1/* 2 * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved. 3 * 4 * Author: Shlomi Gridish <gridish@freescale.com> 5 * Li Yang <leoli@freescale.com> 6 * 7 * Description: 8 * QE UCC Gigabit Ethernet Driver 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 */ 15#include <linux/kernel.h> 16#include <linux/init.h> 17#include <linux/errno.h> 18#include <linux/slab.h> 19#include <linux/stddef.h> 20#include <linux/interrupt.h> 21#include <linux/netdevice.h> 22#include <linux/etherdevice.h> 23#include <linux/skbuff.h> 24#include <linux/spinlock.h> 25#include <linux/mm.h> 26#include <linux/dma-mapping.h> 27#include <linux/mii.h> 28#include <linux/phy.h> 29#include <linux/workqueue.h> 30#include <linux/of_mdio.h> 31#include <linux/of_platform.h> 32 33#include <asm/uaccess.h> 34#include <asm/irq.h> 35#include <asm/io.h> 36#include <asm/immap_qe.h> 37#include <asm/qe.h> 38#include <asm/ucc.h> 39#include <asm/ucc_fast.h> 40#include <asm/machdep.h> 41 42#include "ucc_geth.h" 43#include "fsl_pq_mdio.h" 44 45#undef DEBUG 46 47#define ugeth_printk(level, format, arg...) \ 48 printk(level format "\n", ## arg) 49 50#define ugeth_dbg(format, arg...) \ 51 ugeth_printk(KERN_DEBUG , format , ## arg) 52#define ugeth_err(format, arg...) \ 53 ugeth_printk(KERN_ERR , format , ## arg) 54#define ugeth_info(format, arg...) \ 55 ugeth_printk(KERN_INFO , format , ## arg) 56#define ugeth_warn(format, arg...) \ 57 ugeth_printk(KERN_WARNING , format , ## arg) 58 59#ifdef UGETH_VERBOSE_DEBUG 60#define ugeth_vdbg ugeth_dbg 61#else 62#define ugeth_vdbg(fmt, args...) do { } while (0) 63#endif /* UGETH_VERBOSE_DEBUG */ 64#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 65 66 67static DEFINE_SPINLOCK(ugeth_lock); 68 69static struct { 70 u32 msg_enable; 71} debug = { -1 }; 72 73module_param_named(debug, debug.msg_enable, int, 0); 74MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)"); 75 76static struct ucc_geth_info ugeth_primary_info = { 77 .uf_info = { 78 .bd_mem_part = MEM_PART_SYSTEM, 79 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, 80 .max_rx_buf_length = 1536, 81 /* adjusted at startup if max-speed 1000 */ 82 .urfs = UCC_GETH_URFS_INIT, 83 .urfet = UCC_GETH_URFET_INIT, 84 .urfset = UCC_GETH_URFSET_INIT, 85 .utfs = UCC_GETH_UTFS_INIT, 86 .utfet = UCC_GETH_UTFET_INIT, 87 .utftt = UCC_GETH_UTFTT_INIT, 88 .ufpt = 256, 89 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, 90 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, 91 .tenc = UCC_FAST_TX_ENCODING_NRZ, 92 .renc = UCC_FAST_RX_ENCODING_NRZ, 93 .tcrc = UCC_FAST_16_BIT_CRC, 94 .synl = UCC_FAST_SYNC_LEN_NOT_USED, 95 }, 96 .numQueuesTx = 1, 97 .numQueuesRx = 1, 98 .extendedFilteringChainPointer = ((uint32_t) NULL), 99 .typeorlen = 3072 /*1536 */ , 100 .nonBackToBackIfgPart1 = 0x40, 101 .nonBackToBackIfgPart2 = 0x60, 102 .miminumInterFrameGapEnforcement = 0x50, 103 .backToBackInterFrameGap = 0x60, 104 .mblinterval = 128, 105 .nortsrbytetime = 5, 106 .fracsiz = 1, 107 .strictpriorityq = 0xff, 108 .altBebTruncation = 0xa, 109 .excessDefer = 1, 110 .maxRetransmission = 0xf, 111 .collisionWindow = 0x37, 112 .receiveFlowControl = 1, 113 .transmitFlowControl = 1, 114 .maxGroupAddrInHash = 4, 115 .maxIndAddrInHash = 4, 116 .prel = 7, 117 .maxFrameLength = 1518, 118 .minFrameLength = 64, 119 .maxD1Length = 1520, 120 .maxD2Length = 1520, 121 .vlantype = 0x8100, 122 .ecamptr = ((uint32_t) NULL), 123 .eventRegMask = UCCE_OTHER, 124 .pausePeriod = 0xf000, 125 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, 126 .bdRingLenTx = { 127 TX_BD_RING_LEN, 128 TX_BD_RING_LEN, 129 TX_BD_RING_LEN, 130 TX_BD_RING_LEN, 131 TX_BD_RING_LEN, 132 TX_BD_RING_LEN, 133 TX_BD_RING_LEN, 134 TX_BD_RING_LEN}, 135 136 .bdRingLenRx = { 137 RX_BD_RING_LEN, 138 RX_BD_RING_LEN, 139 RX_BD_RING_LEN, 140 RX_BD_RING_LEN, 141 RX_BD_RING_LEN, 142 RX_BD_RING_LEN, 143 RX_BD_RING_LEN, 144 RX_BD_RING_LEN}, 145 146 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, 147 .largestexternallookupkeysize = 148 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, 149 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE | 150 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX | 151 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX, 152 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, 153 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, 154 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, 155 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, 156 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, 157 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1, 158 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1, 159 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, 160 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, 161}; 162 163static struct ucc_geth_info ugeth_info[8]; 164 165#ifdef DEBUG 166static void mem_disp(u8 *addr, int size) 167{ 168 u8 *i; 169 int size16Aling = (size >> 4) << 4; 170 int size4Aling = (size >> 2) << 2; 171 int notAlign = 0; 172 if (size % 16) 173 notAlign = 1; 174 175 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) 176 printk("0x%08x: %08x %08x %08x %08x\r\n", 177 (u32) i, 178 *((u32 *) (i)), 179 *((u32 *) (i + 4)), 180 *((u32 *) (i + 8)), *((u32 *) (i + 12))); 181 if (notAlign == 1) 182 printk("0x%08x: ", (u32) i); 183 for (; (u32) i < (u32) addr + size4Aling; i += 4) 184 printk("%08x ", *((u32 *) (i))); 185 for (; (u32) i < (u32) addr + size; i++) 186 printk("%02x", *((u8 *) (i))); 187 if (notAlign == 1) 188 printk("\r\n"); 189} 190#endif /* DEBUG */ 191 192static struct list_head *dequeue(struct list_head *lh) 193{ 194 unsigned long flags; 195 196 spin_lock_irqsave(&ugeth_lock, flags); 197 if (!list_empty(lh)) { 198 struct list_head *node = lh->next; 199 list_del(node); 200 spin_unlock_irqrestore(&ugeth_lock, flags); 201 return node; 202 } else { 203 spin_unlock_irqrestore(&ugeth_lock, flags); 204 return NULL; 205 } 206} 207 208static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, 209 u8 __iomem *bd) 210{ 211 struct sk_buff *skb = NULL; 212 213 skb = __skb_dequeue(&ugeth->rx_recycle); 214 if (!skb) 215 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + 216 UCC_GETH_RX_DATA_BUF_ALIGNMENT); 217 if (skb == NULL) 218 return NULL; 219 220 /* We need the data buffer to be aligned properly. We will reserve 221 * as many bytes as needed to align the data properly 222 */ 223 skb_reserve(skb, 224 UCC_GETH_RX_DATA_BUF_ALIGNMENT - 225 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - 226 1))); 227 228 skb->dev = ugeth->ndev; 229 230 out_be32(&((struct qe_bd __iomem *)bd)->buf, 231 dma_map_single(ugeth->dev, 232 skb->data, 233 ugeth->ug_info->uf_info.max_rx_buf_length + 234 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 235 DMA_FROM_DEVICE)); 236 237 out_be32((u32 __iomem *)bd, 238 (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); 239 240 return skb; 241} 242 243static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) 244{ 245 u8 __iomem *bd; 246 u32 bd_status; 247 struct sk_buff *skb; 248 int i; 249 250 bd = ugeth->p_rx_bd_ring[rxQ]; 251 i = 0; 252 253 do { 254 bd_status = in_be32((u32 __iomem *)bd); 255 skb = get_new_skb(ugeth, bd); 256 257 if (!skb) /* If can not allocate data buffer, 258 abort. Cleanup will be elsewhere */ 259 return -ENOMEM; 260 261 ugeth->rx_skbuff[rxQ][i] = skb; 262 263 /* advance the BD pointer */ 264 bd += sizeof(struct qe_bd); 265 i++; 266 } while (!(bd_status & R_W)); 267 268 return 0; 269} 270 271static int fill_init_enet_entries(struct ucc_geth_private *ugeth, 272 u32 *p_start, 273 u8 num_entries, 274 u32 thread_size, 275 u32 thread_alignment, 276 unsigned int risc, 277 int skip_page_for_first_entry) 278{ 279 u32 init_enet_offset; 280 u8 i; 281 int snum; 282 283 for (i = 0; i < num_entries; i++) { 284 if ((snum = qe_get_snum()) < 0) { 285 if (netif_msg_ifup(ugeth)) 286 ugeth_err("fill_init_enet_entries: Can not get SNUM."); 287 return snum; 288 } 289 if ((i == 0) && skip_page_for_first_entry) 290 /* First entry of Rx does not have page */ 291 init_enet_offset = 0; 292 else { 293 init_enet_offset = 294 qe_muram_alloc(thread_size, thread_alignment); 295 if (IS_ERR_VALUE(init_enet_offset)) { 296 if (netif_msg_ifup(ugeth)) 297 ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory."); 298 qe_put_snum((u8) snum); 299 return -ENOMEM; 300 } 301 } 302 *(p_start++) = 303 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset 304 | risc; 305 } 306 307 return 0; 308} 309 310static int return_init_enet_entries(struct ucc_geth_private *ugeth, 311 u32 *p_start, 312 u8 num_entries, 313 unsigned int risc, 314 int skip_page_for_first_entry) 315{ 316 u32 init_enet_offset; 317 u8 i; 318 int snum; 319 320 for (i = 0; i < num_entries; i++) { 321 u32 val = *p_start; 322 323 /* Check that this entry was actually valid -- 324 needed in case failed in allocations */ 325 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { 326 snum = 327 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> 328 ENET_INIT_PARAM_SNUM_SHIFT; 329 qe_put_snum((u8) snum); 330 if (!((i == 0) && skip_page_for_first_entry)) { 331 /* First entry of Rx does not have page */ 332 init_enet_offset = 333 (val & ENET_INIT_PARAM_PTR_MASK); 334 qe_muram_free(init_enet_offset); 335 } 336 *p_start++ = 0; 337 } 338 } 339 340 return 0; 341} 342 343#ifdef DEBUG 344static int dump_init_enet_entries(struct ucc_geth_private *ugeth, 345 u32 __iomem *p_start, 346 u8 num_entries, 347 u32 thread_size, 348 unsigned int risc, 349 int skip_page_for_first_entry) 350{ 351 u32 init_enet_offset; 352 u8 i; 353 int snum; 354 355 for (i = 0; i < num_entries; i++) { 356 u32 val = in_be32(p_start); 357 358 /* Check that this entry was actually valid -- 359 needed in case failed in allocations */ 360 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { 361 snum = 362 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> 363 ENET_INIT_PARAM_SNUM_SHIFT; 364 qe_put_snum((u8) snum); 365 if (!((i == 0) && skip_page_for_first_entry)) { 366 /* First entry of Rx does not have page */ 367 init_enet_offset = 368 (in_be32(p_start) & 369 ENET_INIT_PARAM_PTR_MASK); 370 ugeth_info("Init enet entry %d:", i); 371 ugeth_info("Base address: 0x%08x", 372 (u32) 373 qe_muram_addr(init_enet_offset)); 374 mem_disp(qe_muram_addr(init_enet_offset), 375 thread_size); 376 } 377 p_start++; 378 } 379 } 380 381 return 0; 382} 383#endif 384 385static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) 386{ 387 kfree(enet_addr_cont); 388} 389 390static void set_mac_addr(__be16 __iomem *reg, u8 *mac) 391{ 392 out_be16(®[0], ((u16)mac[5] << 8) | mac[4]); 393 out_be16(®[1], ((u16)mac[3] << 8) | mac[2]); 394 out_be16(®[2], ((u16)mac[1] << 8) | mac[0]); 395} 396 397static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) 398{ 399 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 400 401 if (!(paddr_num < NUM_OF_PADDRS)) { 402 ugeth_warn("%s: Illagel paddr_num.", __func__); 403 return -EINVAL; 404 } 405 406 p_82xx_addr_filt = 407 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> 408 addressfiltering; 409 410 /* Writing address ff.ff.ff.ff.ff.ff disables address 411 recognition for this register */ 412 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); 413 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); 414 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); 415 416 return 0; 417} 418 419static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, 420 u8 *p_enet_addr) 421{ 422 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 423 u32 cecr_subblock; 424 425 p_82xx_addr_filt = 426 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> 427 addressfiltering; 428 429 cecr_subblock = 430 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 431 432 /* Ethernet frames are defined in Little Endian mode, 433 therefore to insert */ 434 /* the address to the hash (Big Endian mode), we reverse the bytes.*/ 435 436 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); 437 438 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, 439 QE_CR_PROTOCOL_ETHERNET, 0); 440} 441 442static inline int compare_addr(u8 **addr1, u8 **addr2) 443{ 444 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); 445} 446 447#ifdef DEBUG 448static void get_statistics(struct ucc_geth_private *ugeth, 449 struct ucc_geth_tx_firmware_statistics * 450 tx_firmware_statistics, 451 struct ucc_geth_rx_firmware_statistics * 452 rx_firmware_statistics, 453 struct ucc_geth_hardware_statistics *hardware_statistics) 454{ 455 struct ucc_fast __iomem *uf_regs; 456 struct ucc_geth __iomem *ug_regs; 457 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 458 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 459 460 ug_regs = ugeth->ug_regs; 461 uf_regs = (struct ucc_fast __iomem *) ug_regs; 462 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; 463 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; 464 465 /* Tx firmware only if user handed pointer and driver actually 466 gathers Tx firmware statistics */ 467 if (tx_firmware_statistics && p_tx_fw_statistics_pram) { 468 tx_firmware_statistics->sicoltx = 469 in_be32(&p_tx_fw_statistics_pram->sicoltx); 470 tx_firmware_statistics->mulcoltx = 471 in_be32(&p_tx_fw_statistics_pram->mulcoltx); 472 tx_firmware_statistics->latecoltxfr = 473 in_be32(&p_tx_fw_statistics_pram->latecoltxfr); 474 tx_firmware_statistics->frabortduecol = 475 in_be32(&p_tx_fw_statistics_pram->frabortduecol); 476 tx_firmware_statistics->frlostinmactxer = 477 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); 478 tx_firmware_statistics->carriersenseertx = 479 in_be32(&p_tx_fw_statistics_pram->carriersenseertx); 480 tx_firmware_statistics->frtxok = 481 in_be32(&p_tx_fw_statistics_pram->frtxok); 482 tx_firmware_statistics->txfrexcessivedefer = 483 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); 484 tx_firmware_statistics->txpkts256 = 485 in_be32(&p_tx_fw_statistics_pram->txpkts256); 486 tx_firmware_statistics->txpkts512 = 487 in_be32(&p_tx_fw_statistics_pram->txpkts512); 488 tx_firmware_statistics->txpkts1024 = 489 in_be32(&p_tx_fw_statistics_pram->txpkts1024); 490 tx_firmware_statistics->txpktsjumbo = 491 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); 492 } 493 494 /* Rx firmware only if user handed pointer and driver actually 495 * gathers Rx firmware statistics */ 496 if (rx_firmware_statistics && p_rx_fw_statistics_pram) { 497 int i; 498 rx_firmware_statistics->frrxfcser = 499 in_be32(&p_rx_fw_statistics_pram->frrxfcser); 500 rx_firmware_statistics->fraligner = 501 in_be32(&p_rx_fw_statistics_pram->fraligner); 502 rx_firmware_statistics->inrangelenrxer = 503 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); 504 rx_firmware_statistics->outrangelenrxer = 505 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); 506 rx_firmware_statistics->frtoolong = 507 in_be32(&p_rx_fw_statistics_pram->frtoolong); 508 rx_firmware_statistics->runt = 509 in_be32(&p_rx_fw_statistics_pram->runt); 510 rx_firmware_statistics->verylongevent = 511 in_be32(&p_rx_fw_statistics_pram->verylongevent); 512 rx_firmware_statistics->symbolerror = 513 in_be32(&p_rx_fw_statistics_pram->symbolerror); 514 rx_firmware_statistics->dropbsy = 515 in_be32(&p_rx_fw_statistics_pram->dropbsy); 516 for (i = 0; i < 0x8; i++) 517 rx_firmware_statistics->res0[i] = 518 p_rx_fw_statistics_pram->res0[i]; 519 rx_firmware_statistics->mismatchdrop = 520 in_be32(&p_rx_fw_statistics_pram->mismatchdrop); 521 rx_firmware_statistics->underpkts = 522 in_be32(&p_rx_fw_statistics_pram->underpkts); 523 rx_firmware_statistics->pkts256 = 524 in_be32(&p_rx_fw_statistics_pram->pkts256); 525 rx_firmware_statistics->pkts512 = 526 in_be32(&p_rx_fw_statistics_pram->pkts512); 527 rx_firmware_statistics->pkts1024 = 528 in_be32(&p_rx_fw_statistics_pram->pkts1024); 529 rx_firmware_statistics->pktsjumbo = 530 in_be32(&p_rx_fw_statistics_pram->pktsjumbo); 531 rx_firmware_statistics->frlossinmacer = 532 in_be32(&p_rx_fw_statistics_pram->frlossinmacer); 533 rx_firmware_statistics->pausefr = 534 in_be32(&p_rx_fw_statistics_pram->pausefr); 535 for (i = 0; i < 0x4; i++) 536 rx_firmware_statistics->res1[i] = 537 p_rx_fw_statistics_pram->res1[i]; 538 rx_firmware_statistics->removevlan = 539 in_be32(&p_rx_fw_statistics_pram->removevlan); 540 rx_firmware_statistics->replacevlan = 541 in_be32(&p_rx_fw_statistics_pram->replacevlan); 542 rx_firmware_statistics->insertvlan = 543 in_be32(&p_rx_fw_statistics_pram->insertvlan); 544 } 545 546 /* Hardware only if user handed pointer and driver actually 547 gathers hardware statistics */ 548 if (hardware_statistics && 549 (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) { 550 hardware_statistics->tx64 = in_be32(&ug_regs->tx64); 551 hardware_statistics->tx127 = in_be32(&ug_regs->tx127); 552 hardware_statistics->tx255 = in_be32(&ug_regs->tx255); 553 hardware_statistics->rx64 = in_be32(&ug_regs->rx64); 554 hardware_statistics->rx127 = in_be32(&ug_regs->rx127); 555 hardware_statistics->rx255 = in_be32(&ug_regs->rx255); 556 hardware_statistics->txok = in_be32(&ug_regs->txok); 557 hardware_statistics->txcf = in_be16(&ug_regs->txcf); 558 hardware_statistics->tmca = in_be32(&ug_regs->tmca); 559 hardware_statistics->tbca = in_be32(&ug_regs->tbca); 560 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); 561 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); 562 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); 563 hardware_statistics->rmca = in_be32(&ug_regs->rmca); 564 hardware_statistics->rbca = in_be32(&ug_regs->rbca); 565 } 566} 567 568static void dump_bds(struct ucc_geth_private *ugeth) 569{ 570 int i; 571 int length; 572 573 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 574 if (ugeth->p_tx_bd_ring[i]) { 575 length = 576 (ugeth->ug_info->bdRingLenTx[i] * 577 sizeof(struct qe_bd)); 578 ugeth_info("TX BDs[%d]", i); 579 mem_disp(ugeth->p_tx_bd_ring[i], length); 580 } 581 } 582 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 583 if (ugeth->p_rx_bd_ring[i]) { 584 length = 585 (ugeth->ug_info->bdRingLenRx[i] * 586 sizeof(struct qe_bd)); 587 ugeth_info("RX BDs[%d]", i); 588 mem_disp(ugeth->p_rx_bd_ring[i], length); 589 } 590 } 591} 592 593static void dump_regs(struct ucc_geth_private *ugeth) 594{ 595 int i; 596 597 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1); 598 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); 599 600 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", 601 (u32) & ugeth->ug_regs->maccfg1, 602 in_be32(&ugeth->ug_regs->maccfg1)); 603 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", 604 (u32) & ugeth->ug_regs->maccfg2, 605 in_be32(&ugeth->ug_regs->maccfg2)); 606 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", 607 (u32) & ugeth->ug_regs->ipgifg, 608 in_be32(&ugeth->ug_regs->ipgifg)); 609 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", 610 (u32) & ugeth->ug_regs->hafdup, 611 in_be32(&ugeth->ug_regs->hafdup)); 612 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", 613 (u32) & ugeth->ug_regs->ifctl, 614 in_be32(&ugeth->ug_regs->ifctl)); 615 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", 616 (u32) & ugeth->ug_regs->ifstat, 617 in_be32(&ugeth->ug_regs->ifstat)); 618 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", 619 (u32) & ugeth->ug_regs->macstnaddr1, 620 in_be32(&ugeth->ug_regs->macstnaddr1)); 621 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", 622 (u32) & ugeth->ug_regs->macstnaddr2, 623 in_be32(&ugeth->ug_regs->macstnaddr2)); 624 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", 625 (u32) & ugeth->ug_regs->uempr, 626 in_be32(&ugeth->ug_regs->uempr)); 627 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", 628 (u32) & ugeth->ug_regs->utbipar, 629 in_be32(&ugeth->ug_regs->utbipar)); 630 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", 631 (u32) & ugeth->ug_regs->uescr, 632 in_be16(&ugeth->ug_regs->uescr)); 633 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", 634 (u32) & ugeth->ug_regs->tx64, 635 in_be32(&ugeth->ug_regs->tx64)); 636 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", 637 (u32) & ugeth->ug_regs->tx127, 638 in_be32(&ugeth->ug_regs->tx127)); 639 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", 640 (u32) & ugeth->ug_regs->tx255, 641 in_be32(&ugeth->ug_regs->tx255)); 642 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", 643 (u32) & ugeth->ug_regs->rx64, 644 in_be32(&ugeth->ug_regs->rx64)); 645 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", 646 (u32) & ugeth->ug_regs->rx127, 647 in_be32(&ugeth->ug_regs->rx127)); 648 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", 649 (u32) & ugeth->ug_regs->rx255, 650 in_be32(&ugeth->ug_regs->rx255)); 651 ugeth_info("txok : addr - 0x%08x, val - 0x%08x", 652 (u32) & ugeth->ug_regs->txok, 653 in_be32(&ugeth->ug_regs->txok)); 654 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", 655 (u32) & ugeth->ug_regs->txcf, 656 in_be16(&ugeth->ug_regs->txcf)); 657 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", 658 (u32) & ugeth->ug_regs->tmca, 659 in_be32(&ugeth->ug_regs->tmca)); 660 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", 661 (u32) & ugeth->ug_regs->tbca, 662 in_be32(&ugeth->ug_regs->tbca)); 663 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", 664 (u32) & ugeth->ug_regs->rxfok, 665 in_be32(&ugeth->ug_regs->rxfok)); 666 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", 667 (u32) & ugeth->ug_regs->rxbok, 668 in_be32(&ugeth->ug_regs->rxbok)); 669 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", 670 (u32) & ugeth->ug_regs->rbyt, 671 in_be32(&ugeth->ug_regs->rbyt)); 672 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", 673 (u32) & ugeth->ug_regs->rmca, 674 in_be32(&ugeth->ug_regs->rmca)); 675 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", 676 (u32) & ugeth->ug_regs->rbca, 677 in_be32(&ugeth->ug_regs->rbca)); 678 ugeth_info("scar : addr - 0x%08x, val - 0x%08x", 679 (u32) & ugeth->ug_regs->scar, 680 in_be32(&ugeth->ug_regs->scar)); 681 ugeth_info("scam : addr - 0x%08x, val - 0x%08x", 682 (u32) & ugeth->ug_regs->scam, 683 in_be32(&ugeth->ug_regs->scam)); 684 685 if (ugeth->p_thread_data_tx) { 686 int numThreadsTxNumerical; 687 switch (ugeth->ug_info->numThreadsTx) { 688 case UCC_GETH_NUM_OF_THREADS_1: 689 numThreadsTxNumerical = 1; 690 break; 691 case UCC_GETH_NUM_OF_THREADS_2: 692 numThreadsTxNumerical = 2; 693 break; 694 case UCC_GETH_NUM_OF_THREADS_4: 695 numThreadsTxNumerical = 4; 696 break; 697 case UCC_GETH_NUM_OF_THREADS_6: 698 numThreadsTxNumerical = 6; 699 break; 700 case UCC_GETH_NUM_OF_THREADS_8: 701 numThreadsTxNumerical = 8; 702 break; 703 default: 704 numThreadsTxNumerical = 0; 705 break; 706 } 707 708 ugeth_info("Thread data TXs:"); 709 ugeth_info("Base address: 0x%08x", 710 (u32) ugeth->p_thread_data_tx); 711 for (i = 0; i < numThreadsTxNumerical; i++) { 712 ugeth_info("Thread data TX[%d]:", i); 713 ugeth_info("Base address: 0x%08x", 714 (u32) & ugeth->p_thread_data_tx[i]); 715 mem_disp((u8 *) & ugeth->p_thread_data_tx[i], 716 sizeof(struct ucc_geth_thread_data_tx)); 717 } 718 } 719 if (ugeth->p_thread_data_rx) { 720 int numThreadsRxNumerical; 721 switch (ugeth->ug_info->numThreadsRx) { 722 case UCC_GETH_NUM_OF_THREADS_1: 723 numThreadsRxNumerical = 1; 724 break; 725 case UCC_GETH_NUM_OF_THREADS_2: 726 numThreadsRxNumerical = 2; 727 break; 728 case UCC_GETH_NUM_OF_THREADS_4: 729 numThreadsRxNumerical = 4; 730 break; 731 case UCC_GETH_NUM_OF_THREADS_6: 732 numThreadsRxNumerical = 6; 733 break; 734 case UCC_GETH_NUM_OF_THREADS_8: 735 numThreadsRxNumerical = 8; 736 break; 737 default: 738 numThreadsRxNumerical = 0; 739 break; 740 } 741 742 ugeth_info("Thread data RX:"); 743 ugeth_info("Base address: 0x%08x", 744 (u32) ugeth->p_thread_data_rx); 745 for (i = 0; i < numThreadsRxNumerical; i++) { 746 ugeth_info("Thread data RX[%d]:", i); 747 ugeth_info("Base address: 0x%08x", 748 (u32) & ugeth->p_thread_data_rx[i]); 749 mem_disp((u8 *) & ugeth->p_thread_data_rx[i], 750 sizeof(struct ucc_geth_thread_data_rx)); 751 } 752 } 753 if (ugeth->p_exf_glbl_param) { 754 ugeth_info("EXF global param:"); 755 ugeth_info("Base address: 0x%08x", 756 (u32) ugeth->p_exf_glbl_param); 757 mem_disp((u8 *) ugeth->p_exf_glbl_param, 758 sizeof(*ugeth->p_exf_glbl_param)); 759 } 760 if (ugeth->p_tx_glbl_pram) { 761 ugeth_info("TX global param:"); 762 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); 763 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", 764 (u32) & ugeth->p_tx_glbl_pram->temoder, 765 in_be16(&ugeth->p_tx_glbl_pram->temoder)); 766 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", 767 (u32) & ugeth->p_tx_glbl_pram->sqptr, 768 in_be32(&ugeth->p_tx_glbl_pram->sqptr)); 769 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", 770 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, 771 in_be32(&ugeth->p_tx_glbl_pram-> 772 schedulerbasepointer)); 773 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", 774 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, 775 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); 776 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", 777 (u32) & ugeth->p_tx_glbl_pram->tstate, 778 in_be32(&ugeth->p_tx_glbl_pram->tstate)); 779 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", 780 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], 781 ugeth->p_tx_glbl_pram->iphoffset[0]); 782 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", 783 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], 784 ugeth->p_tx_glbl_pram->iphoffset[1]); 785 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", 786 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], 787 ugeth->p_tx_glbl_pram->iphoffset[2]); 788 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", 789 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], 790 ugeth->p_tx_glbl_pram->iphoffset[3]); 791 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", 792 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], 793 ugeth->p_tx_glbl_pram->iphoffset[4]); 794 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", 795 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], 796 ugeth->p_tx_glbl_pram->iphoffset[5]); 797 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", 798 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], 799 ugeth->p_tx_glbl_pram->iphoffset[6]); 800 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", 801 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], 802 ugeth->p_tx_glbl_pram->iphoffset[7]); 803 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", 804 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], 805 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); 806 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", 807 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], 808 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); 809 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", 810 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], 811 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); 812 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", 813 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], 814 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); 815 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", 816 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], 817 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); 818 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", 819 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], 820 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); 821 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", 822 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], 823 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); 824 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", 825 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], 826 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); 827 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", 828 (u32) & ugeth->p_tx_glbl_pram->tqptr, 829 in_be32(&ugeth->p_tx_glbl_pram->tqptr)); 830 } 831 if (ugeth->p_rx_glbl_pram) { 832 ugeth_info("RX global param:"); 833 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); 834 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", 835 (u32) & ugeth->p_rx_glbl_pram->remoder, 836 in_be32(&ugeth->p_rx_glbl_pram->remoder)); 837 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", 838 (u32) & ugeth->p_rx_glbl_pram->rqptr, 839 in_be32(&ugeth->p_rx_glbl_pram->rqptr)); 840 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", 841 (u32) & ugeth->p_rx_glbl_pram->typeorlen, 842 in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); 843 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", 844 (u32) & ugeth->p_rx_glbl_pram->rxgstpack, 845 ugeth->p_rx_glbl_pram->rxgstpack); 846 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", 847 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, 848 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); 849 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", 850 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, 851 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); 852 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", 853 (u32) & ugeth->p_rx_glbl_pram->rstate, 854 ugeth->p_rx_glbl_pram->rstate); 855 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", 856 (u32) & ugeth->p_rx_glbl_pram->mrblr, 857 in_be16(&ugeth->p_rx_glbl_pram->mrblr)); 858 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", 859 (u32) & ugeth->p_rx_glbl_pram->rbdqptr, 860 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); 861 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", 862 (u32) & ugeth->p_rx_glbl_pram->mflr, 863 in_be16(&ugeth->p_rx_glbl_pram->mflr)); 864 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", 865 (u32) & ugeth->p_rx_glbl_pram->minflr, 866 in_be16(&ugeth->p_rx_glbl_pram->minflr)); 867 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", 868 (u32) & ugeth->p_rx_glbl_pram->maxd1, 869 in_be16(&ugeth->p_rx_glbl_pram->maxd1)); 870 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", 871 (u32) & ugeth->p_rx_glbl_pram->maxd2, 872 in_be16(&ugeth->p_rx_glbl_pram->maxd2)); 873 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", 874 (u32) & ugeth->p_rx_glbl_pram->ecamptr, 875 in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); 876 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", 877 (u32) & ugeth->p_rx_glbl_pram->l2qt, 878 in_be32(&ugeth->p_rx_glbl_pram->l2qt)); 879 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", 880 (u32) & ugeth->p_rx_glbl_pram->l3qt[0], 881 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); 882 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", 883 (u32) & ugeth->p_rx_glbl_pram->l3qt[1], 884 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); 885 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", 886 (u32) & ugeth->p_rx_glbl_pram->l3qt[2], 887 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); 888 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", 889 (u32) & ugeth->p_rx_glbl_pram->l3qt[3], 890 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); 891 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", 892 (u32) & ugeth->p_rx_glbl_pram->l3qt[4], 893 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); 894 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", 895 (u32) & ugeth->p_rx_glbl_pram->l3qt[5], 896 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); 897 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", 898 (u32) & ugeth->p_rx_glbl_pram->l3qt[6], 899 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); 900 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", 901 (u32) & ugeth->p_rx_glbl_pram->l3qt[7], 902 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); 903 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", 904 (u32) & ugeth->p_rx_glbl_pram->vlantype, 905 in_be16(&ugeth->p_rx_glbl_pram->vlantype)); 906 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", 907 (u32) & ugeth->p_rx_glbl_pram->vlantci, 908 in_be16(&ugeth->p_rx_glbl_pram->vlantci)); 909 for (i = 0; i < 64; i++) 910 ugeth_info 911 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", 912 i, 913 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], 914 ugeth->p_rx_glbl_pram->addressfiltering[i]); 915 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", 916 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, 917 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); 918 } 919 if (ugeth->p_send_q_mem_reg) { 920 ugeth_info("Send Q memory registers:"); 921 ugeth_info("Base address: 0x%08x", 922 (u32) ugeth->p_send_q_mem_reg); 923 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 924 ugeth_info("SQQD[%d]:", i); 925 ugeth_info("Base address: 0x%08x", 926 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); 927 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], 928 sizeof(struct ucc_geth_send_queue_qd)); 929 } 930 } 931 if (ugeth->p_scheduler) { 932 ugeth_info("Scheduler:"); 933 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); 934 mem_disp((u8 *) ugeth->p_scheduler, 935 sizeof(*ugeth->p_scheduler)); 936 } 937 if (ugeth->p_tx_fw_statistics_pram) { 938 ugeth_info("TX FW statistics pram:"); 939 ugeth_info("Base address: 0x%08x", 940 (u32) ugeth->p_tx_fw_statistics_pram); 941 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, 942 sizeof(*ugeth->p_tx_fw_statistics_pram)); 943 } 944 if (ugeth->p_rx_fw_statistics_pram) { 945 ugeth_info("RX FW statistics pram:"); 946 ugeth_info("Base address: 0x%08x", 947 (u32) ugeth->p_rx_fw_statistics_pram); 948 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, 949 sizeof(*ugeth->p_rx_fw_statistics_pram)); 950 } 951 if (ugeth->p_rx_irq_coalescing_tbl) { 952 ugeth_info("RX IRQ coalescing tables:"); 953 ugeth_info("Base address: 0x%08x", 954 (u32) ugeth->p_rx_irq_coalescing_tbl); 955 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 956 ugeth_info("RX IRQ coalescing table entry[%d]:", i); 957 ugeth_info("Base address: 0x%08x", 958 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 959 coalescingentry[i]); 960 ugeth_info 961 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", 962 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 963 coalescingentry[i].interruptcoalescingmaxvalue, 964 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 965 coalescingentry[i]. 966 interruptcoalescingmaxvalue)); 967 ugeth_info 968 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", 969 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 970 coalescingentry[i].interruptcoalescingcounter, 971 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 972 coalescingentry[i]. 973 interruptcoalescingcounter)); 974 } 975 } 976 if (ugeth->p_rx_bd_qs_tbl) { 977 ugeth_info("RX BD QS tables:"); 978 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); 979 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 980 ugeth_info("RX BD QS table[%d]:", i); 981 ugeth_info("Base address: 0x%08x", 982 (u32) & ugeth->p_rx_bd_qs_tbl[i]); 983 ugeth_info 984 ("bdbaseptr : addr - 0x%08x, val - 0x%08x", 985 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, 986 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); 987 ugeth_info 988 ("bdptr : addr - 0x%08x, val - 0x%08x", 989 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, 990 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); 991 ugeth_info 992 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", 993 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 994 in_be32(&ugeth->p_rx_bd_qs_tbl[i]. 995 externalbdbaseptr)); 996 ugeth_info 997 ("externalbdptr : addr - 0x%08x, val - 0x%08x", 998 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, 999 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); 1000 ugeth_info("ucode RX Prefetched BDs:"); 1001 ugeth_info("Base address: 0x%08x", 1002 (u32) 1003 qe_muram_addr(in_be32 1004 (&ugeth->p_rx_bd_qs_tbl[i]. 1005 bdbaseptr))); 1006 mem_disp((u8 *) 1007 qe_muram_addr(in_be32 1008 (&ugeth->p_rx_bd_qs_tbl[i]. 1009 bdbaseptr)), 1010 sizeof(struct ucc_geth_rx_prefetched_bds)); 1011 } 1012 } 1013 if (ugeth->p_init_enet_param_shadow) { 1014 int size; 1015 ugeth_info("Init enet param shadow:"); 1016 ugeth_info("Base address: 0x%08x", 1017 (u32) ugeth->p_init_enet_param_shadow); 1018 mem_disp((u8 *) ugeth->p_init_enet_param_shadow, 1019 sizeof(*ugeth->p_init_enet_param_shadow)); 1020 1021 size = sizeof(struct ucc_geth_thread_rx_pram); 1022 if (ugeth->ug_info->rxExtendedFiltering) { 1023 size += 1024 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 1025 if (ugeth->ug_info->largestexternallookupkeysize == 1026 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 1027 size += 1028 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 1029 if (ugeth->ug_info->largestexternallookupkeysize == 1030 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 1031 size += 1032 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 1033 } 1034 1035 dump_init_enet_entries(ugeth, 1036 &(ugeth->p_init_enet_param_shadow-> 1037 txthread[0]), 1038 ENET_INIT_PARAM_MAX_ENTRIES_TX, 1039 sizeof(struct ucc_geth_thread_tx_pram), 1040 ugeth->ug_info->riscTx, 0); 1041 dump_init_enet_entries(ugeth, 1042 &(ugeth->p_init_enet_param_shadow-> 1043 rxthread[0]), 1044 ENET_INIT_PARAM_MAX_ENTRIES_RX, size, 1045 ugeth->ug_info->riscRx, 1); 1046 } 1047} 1048#endif /* DEBUG */ 1049 1050static void init_default_reg_vals(u32 __iomem *upsmr_register, 1051 u32 __iomem *maccfg1_register, 1052 u32 __iomem *maccfg2_register) 1053{ 1054 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); 1055 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); 1056 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); 1057} 1058 1059static int init_half_duplex_params(int alt_beb, 1060 int back_pressure_no_backoff, 1061 int no_backoff, 1062 int excess_defer, 1063 u8 alt_beb_truncation, 1064 u8 max_retransmissions, 1065 u8 collision_window, 1066 u32 __iomem *hafdup_register) 1067{ 1068 u32 value = 0; 1069 1070 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || 1071 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || 1072 (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) 1073 return -EINVAL; 1074 1075 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); 1076 1077 if (alt_beb) 1078 value |= HALFDUP_ALT_BEB; 1079 if (back_pressure_no_backoff) 1080 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; 1081 if (no_backoff) 1082 value |= HALFDUP_NO_BACKOFF; 1083 if (excess_defer) 1084 value |= HALFDUP_EXCESSIVE_DEFER; 1085 1086 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); 1087 1088 value |= collision_window; 1089 1090 out_be32(hafdup_register, value); 1091 return 0; 1092} 1093 1094static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, 1095 u8 non_btb_ipg, 1096 u8 min_ifg, 1097 u8 btb_ipg, 1098 u32 __iomem *ipgifg_register) 1099{ 1100 u32 value = 0; 1101 1102 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back 1103 IPG part 2 */ 1104 if (non_btb_cs_ipg > non_btb_ipg) 1105 return -EINVAL; 1106 1107 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || 1108 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || 1109 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ 1110 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) 1111 return -EINVAL; 1112 1113 value |= 1114 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & 1115 IPGIFG_NBTB_CS_IPG_MASK); 1116 value |= 1117 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & 1118 IPGIFG_NBTB_IPG_MASK); 1119 value |= 1120 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & 1121 IPGIFG_MIN_IFG_MASK); 1122 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); 1123 1124 out_be32(ipgifg_register, value); 1125 return 0; 1126} 1127 1128int init_flow_control_params(u32 automatic_flow_control_mode, 1129 int rx_flow_control_enable, 1130 int tx_flow_control_enable, 1131 u16 pause_period, 1132 u16 extension_field, 1133 u32 __iomem *upsmr_register, 1134 u32 __iomem *uempr_register, 1135 u32 __iomem *maccfg1_register) 1136{ 1137 u32 value = 0; 1138 1139 /* Set UEMPR register */ 1140 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; 1141 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; 1142 out_be32(uempr_register, value); 1143 1144 /* Set UPSMR register */ 1145 setbits32(upsmr_register, automatic_flow_control_mode); 1146 1147 value = in_be32(maccfg1_register); 1148 if (rx_flow_control_enable) 1149 value |= MACCFG1_FLOW_RX; 1150 if (tx_flow_control_enable) 1151 value |= MACCFG1_FLOW_TX; 1152 out_be32(maccfg1_register, value); 1153 1154 return 0; 1155} 1156 1157static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, 1158 int auto_zero_hardware_statistics, 1159 u32 __iomem *upsmr_register, 1160 u16 __iomem *uescr_register) 1161{ 1162 u16 uescr_value = 0; 1163 1164 /* Enable hardware statistics gathering if requested */ 1165 if (enable_hardware_statistics) 1166 setbits32(upsmr_register, UCC_GETH_UPSMR_HSE); 1167 1168 /* Clear hardware statistics counters */ 1169 uescr_value = in_be16(uescr_register); 1170 uescr_value |= UESCR_CLRCNT; 1171 /* Automatically zero hardware statistics counters on read, 1172 if requested */ 1173 if (auto_zero_hardware_statistics) 1174 uescr_value |= UESCR_AUTOZ; 1175 out_be16(uescr_register, uescr_value); 1176 1177 return 0; 1178} 1179 1180static int init_firmware_statistics_gathering_mode(int 1181 enable_tx_firmware_statistics, 1182 int enable_rx_firmware_statistics, 1183 u32 __iomem *tx_rmon_base_ptr, 1184 u32 tx_firmware_statistics_structure_address, 1185 u32 __iomem *rx_rmon_base_ptr, 1186 u32 rx_firmware_statistics_structure_address, 1187 u16 __iomem *temoder_register, 1188 u32 __iomem *remoder_register) 1189{ 1190 /* Note: this function does not check if */ 1191 /* the parameters it receives are NULL */ 1192 1193 if (enable_tx_firmware_statistics) { 1194 out_be32(tx_rmon_base_ptr, 1195 tx_firmware_statistics_structure_address); 1196 setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE); 1197 } 1198 1199 if (enable_rx_firmware_statistics) { 1200 out_be32(rx_rmon_base_ptr, 1201 rx_firmware_statistics_structure_address); 1202 setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE); 1203 } 1204 1205 return 0; 1206} 1207 1208static int init_mac_station_addr_regs(u8 address_byte_0, 1209 u8 address_byte_1, 1210 u8 address_byte_2, 1211 u8 address_byte_3, 1212 u8 address_byte_4, 1213 u8 address_byte_5, 1214 u32 __iomem *macstnaddr1_register, 1215 u32 __iomem *macstnaddr2_register) 1216{ 1217 u32 value = 0; 1218 1219 /* Example: for a station address of 0x12345678ABCD, */ 1220 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ 1221 1222 /* MACSTNADDR1 Register: */ 1223 1224 /* 0 7 8 15 */ 1225 /* station address byte 5 station address byte 4 */ 1226 /* 16 23 24 31 */ 1227 /* station address byte 3 station address byte 2 */ 1228 value |= (u32) ((address_byte_2 << 0) & 0x000000FF); 1229 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); 1230 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); 1231 value |= (u32) ((address_byte_5 << 24) & 0xFF000000); 1232 1233 out_be32(macstnaddr1_register, value); 1234 1235 /* MACSTNADDR2 Register: */ 1236 1237 /* 0 7 8 15 */ 1238 /* station address byte 1 station address byte 0 */ 1239 /* 16 23 24 31 */ 1240 /* reserved reserved */ 1241 value = 0; 1242 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); 1243 value |= (u32) ((address_byte_1 << 24) & 0xFF000000); 1244 1245 out_be32(macstnaddr2_register, value); 1246 1247 return 0; 1248} 1249 1250static int init_check_frame_length_mode(int length_check, 1251 u32 __iomem *maccfg2_register) 1252{ 1253 u32 value = 0; 1254 1255 value = in_be32(maccfg2_register); 1256 1257 if (length_check) 1258 value |= MACCFG2_LC; 1259 else 1260 value &= ~MACCFG2_LC; 1261 1262 out_be32(maccfg2_register, value); 1263 return 0; 1264} 1265 1266static int init_preamble_length(u8 preamble_length, 1267 u32 __iomem *maccfg2_register) 1268{ 1269 if ((preamble_length < 3) || (preamble_length > 7)) 1270 return -EINVAL; 1271 1272 clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, 1273 preamble_length << MACCFG2_PREL_SHIFT); 1274 1275 return 0; 1276} 1277 1278static int init_rx_parameters(int reject_broadcast, 1279 int receive_short_frames, 1280 int promiscuous, u32 __iomem *upsmr_register) 1281{ 1282 u32 value = 0; 1283 1284 value = in_be32(upsmr_register); 1285 1286 if (reject_broadcast) 1287 value |= UCC_GETH_UPSMR_BRO; 1288 else 1289 value &= ~UCC_GETH_UPSMR_BRO; 1290 1291 if (receive_short_frames) 1292 value |= UCC_GETH_UPSMR_RSH; 1293 else 1294 value &= ~UCC_GETH_UPSMR_RSH; 1295 1296 if (promiscuous) 1297 value |= UCC_GETH_UPSMR_PRO; 1298 else 1299 value &= ~UCC_GETH_UPSMR_PRO; 1300 1301 out_be32(upsmr_register, value); 1302 1303 return 0; 1304} 1305 1306static int init_max_rx_buff_len(u16 max_rx_buf_len, 1307 u16 __iomem *mrblr_register) 1308{ 1309 /* max_rx_buf_len value must be a multiple of 128 */ 1310 if ((max_rx_buf_len == 0) || 1311 (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) 1312 return -EINVAL; 1313 1314 out_be16(mrblr_register, max_rx_buf_len); 1315 return 0; 1316} 1317 1318static int init_min_frame_len(u16 min_frame_length, 1319 u16 __iomem *minflr_register, 1320 u16 __iomem *mrblr_register) 1321{ 1322 u16 mrblr_value = 0; 1323 1324 mrblr_value = in_be16(mrblr_register); 1325 if (min_frame_length >= (mrblr_value - 4)) 1326 return -EINVAL; 1327 1328 out_be16(minflr_register, min_frame_length); 1329 return 0; 1330} 1331 1332static int adjust_enet_interface(struct ucc_geth_private *ugeth) 1333{ 1334 struct ucc_geth_info *ug_info; 1335 struct ucc_geth __iomem *ug_regs; 1336 struct ucc_fast __iomem *uf_regs; 1337 int ret_val; 1338 u32 upsmr, maccfg2; 1339 u16 value; 1340 1341 ugeth_vdbg("%s: IN", __func__); 1342 1343 ug_info = ugeth->ug_info; 1344 ug_regs = ugeth->ug_regs; 1345 uf_regs = ugeth->uccf->uf_regs; 1346 1347 /* Set MACCFG2 */ 1348 maccfg2 = in_be32(&ug_regs->maccfg2); 1349 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; 1350 if ((ugeth->max_speed == SPEED_10) || 1351 (ugeth->max_speed == SPEED_100)) 1352 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 1353 else if (ugeth->max_speed == SPEED_1000) 1354 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 1355 maccfg2 |= ug_info->padAndCrc; 1356 out_be32(&ug_regs->maccfg2, maccfg2); 1357 1358 /* Set UPSMR */ 1359 upsmr = in_be32(&uf_regs->upsmr); 1360 upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | 1361 UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); 1362 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || 1363 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || 1364 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || 1365 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1366 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || 1367 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1368 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) 1369 upsmr |= UCC_GETH_UPSMR_RPM; 1370 switch (ugeth->max_speed) { 1371 case SPEED_10: 1372 upsmr |= UCC_GETH_UPSMR_R10M; 1373 /* FALLTHROUGH */ 1374 case SPEED_100: 1375 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) 1376 upsmr |= UCC_GETH_UPSMR_RMM; 1377 } 1378 } 1379 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1380 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1381 upsmr |= UCC_GETH_UPSMR_TBIM; 1382 } 1383 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)) 1384 upsmr |= UCC_GETH_UPSMR_SGMM; 1385 1386 out_be32(&uf_regs->upsmr, upsmr); 1387 1388 /* Disable autonegotiation in tbi mode, because by default it 1389 comes up in autonegotiation mode. */ 1390 /* Note that this depends on proper setting in utbipar register. */ 1391 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1392 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1393 struct ucc_geth_info *ug_info = ugeth->ug_info; 1394 struct phy_device *tbiphy; 1395 1396 if (!ug_info->tbi_node) 1397 ugeth_warn("TBI mode requires that the device " 1398 "tree specify a tbi-handle\n"); 1399 1400 tbiphy = of_phy_find_device(ug_info->tbi_node); 1401 if (!tbiphy) 1402 ugeth_warn("Could not get TBI device\n"); 1403 1404 value = phy_read(tbiphy, ENET_TBI_MII_CR); 1405 value &= ~0x1000; /* Turn off autonegotiation */ 1406 phy_write(tbiphy, ENET_TBI_MII_CR, value); 1407 } 1408 1409 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); 1410 1411 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); 1412 if (ret_val != 0) { 1413 if (netif_msg_probe(ugeth)) 1414 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", 1415 __func__); 1416 return ret_val; 1417 } 1418 1419 return 0; 1420} 1421 1422static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) 1423{ 1424 struct ucc_fast_private *uccf; 1425 u32 cecr_subblock; 1426 u32 temp; 1427 int i = 10; 1428 1429 uccf = ugeth->uccf; 1430 1431 /* Mask GRACEFUL STOP TX interrupt bit and clear it */ 1432 clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA); 1433 out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */ 1434 1435 /* Issue host command */ 1436 cecr_subblock = 1437 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1438 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 1439 QE_CR_PROTOCOL_ETHERNET, 0); 1440 1441 /* Wait for command to complete */ 1442 do { 1443 msleep(10); 1444 temp = in_be32(uccf->p_ucce); 1445 } while (!(temp & UCC_GETH_UCCE_GRA) && --i); 1446 1447 uccf->stopped_tx = 1; 1448 1449 return 0; 1450} 1451 1452static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth) 1453{ 1454 struct ucc_fast_private *uccf; 1455 u32 cecr_subblock; 1456 u8 temp; 1457 int i = 10; 1458 1459 uccf = ugeth->uccf; 1460 1461 /* Clear acknowledge bit */ 1462 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); 1463 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 1464 out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); 1465 1466 /* Keep issuing command and checking acknowledge bit until 1467 it is asserted, according to spec */ 1468 do { 1469 /* Issue host command */ 1470 cecr_subblock = 1471 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. 1472 ucc_num); 1473 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 1474 QE_CR_PROTOCOL_ETHERNET, 0); 1475 msleep(10); 1476 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); 1477 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i); 1478 1479 uccf->stopped_rx = 1; 1480 1481 return 0; 1482} 1483 1484static int ugeth_restart_tx(struct ucc_geth_private *ugeth) 1485{ 1486 struct ucc_fast_private *uccf; 1487 u32 cecr_subblock; 1488 1489 uccf = ugeth->uccf; 1490 1491 cecr_subblock = 1492 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1493 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); 1494 uccf->stopped_tx = 0; 1495 1496 return 0; 1497} 1498 1499static int ugeth_restart_rx(struct ucc_geth_private *ugeth) 1500{ 1501 struct ucc_fast_private *uccf; 1502 u32 cecr_subblock; 1503 1504 uccf = ugeth->uccf; 1505 1506 cecr_subblock = 1507 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1508 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 1509 0); 1510 uccf->stopped_rx = 0; 1511 1512 return 0; 1513} 1514 1515static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) 1516{ 1517 struct ucc_fast_private *uccf; 1518 int enabled_tx, enabled_rx; 1519 1520 uccf = ugeth->uccf; 1521 1522 /* check if the UCC number is in range. */ 1523 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1524 if (netif_msg_probe(ugeth)) 1525 ugeth_err("%s: ucc_num out of range.", __func__); 1526 return -EINVAL; 1527 } 1528 1529 enabled_tx = uccf->enabled_tx; 1530 enabled_rx = uccf->enabled_rx; 1531 1532 /* Get Tx and Rx going again, in case this channel was actively 1533 disabled. */ 1534 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) 1535 ugeth_restart_tx(ugeth); 1536 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) 1537 ugeth_restart_rx(ugeth); 1538 1539 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ 1540 1541 return 0; 1542 1543} 1544 1545static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) 1546{ 1547 struct ucc_fast_private *uccf; 1548 1549 uccf = ugeth->uccf; 1550 1551 /* check if the UCC number is in range. */ 1552 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1553 if (netif_msg_probe(ugeth)) 1554 ugeth_err("%s: ucc_num out of range.", __func__); 1555 return -EINVAL; 1556 } 1557 1558 /* Stop any transmissions */ 1559 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) 1560 ugeth_graceful_stop_tx(ugeth); 1561 1562 /* Stop any receptions */ 1563 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) 1564 ugeth_graceful_stop_rx(ugeth); 1565 1566 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ 1567 1568 return 0; 1569} 1570 1571static void ugeth_quiesce(struct ucc_geth_private *ugeth) 1572{ 1573 /* Prevent any further xmits, plus detach the device. */ 1574 netif_device_detach(ugeth->ndev); 1575 1576 /* Wait for any current xmits to finish. */ 1577 netif_tx_disable(ugeth->ndev); 1578 1579 /* Disable the interrupt to avoid NAPI rescheduling. */ 1580 disable_irq(ugeth->ug_info->uf_info.irq); 1581 1582 /* Stop NAPI, and possibly wait for its completion. */ 1583 napi_disable(&ugeth->napi); 1584} 1585 1586static void ugeth_activate(struct ucc_geth_private *ugeth) 1587{ 1588 napi_enable(&ugeth->napi); 1589 enable_irq(ugeth->ug_info->uf_info.irq); 1590 netif_device_attach(ugeth->ndev); 1591} 1592 1593/* Called every time the controller might need to be made 1594 * aware of new link state. The PHY code conveys this 1595 * information through variables in the ugeth structure, and this 1596 * function converts those variables into the appropriate 1597 * register values, and can bring down the device if needed. 1598 */ 1599 1600static void adjust_link(struct net_device *dev) 1601{ 1602 struct ucc_geth_private *ugeth = netdev_priv(dev); 1603 struct ucc_geth __iomem *ug_regs; 1604 struct ucc_fast __iomem *uf_regs; 1605 struct phy_device *phydev = ugeth->phydev; 1606 int new_state = 0; 1607 1608 ug_regs = ugeth->ug_regs; 1609 uf_regs = ugeth->uccf->uf_regs; 1610 1611 if (phydev->link) { 1612 u32 tempval = in_be32(&ug_regs->maccfg2); 1613 u32 upsmr = in_be32(&uf_regs->upsmr); 1614 /* Now we make sure that we can be in full duplex mode. 1615 * If not, we operate in half-duplex mode. */ 1616 if (phydev->duplex != ugeth->oldduplex) { 1617 new_state = 1; 1618 if (!(phydev->duplex)) 1619 tempval &= ~(MACCFG2_FDX); 1620 else 1621 tempval |= MACCFG2_FDX; 1622 ugeth->oldduplex = phydev->duplex; 1623 } 1624 1625 if (phydev->speed != ugeth->oldspeed) { 1626 new_state = 1; 1627 switch (phydev->speed) { 1628 case SPEED_1000: 1629 tempval = ((tempval & 1630 ~(MACCFG2_INTERFACE_MODE_MASK)) | 1631 MACCFG2_INTERFACE_MODE_BYTE); 1632 break; 1633 case SPEED_100: 1634 case SPEED_10: 1635 tempval = ((tempval & 1636 ~(MACCFG2_INTERFACE_MODE_MASK)) | 1637 MACCFG2_INTERFACE_MODE_NIBBLE); 1638 /* if reduced mode, re-set UPSMR.R10M */ 1639 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || 1640 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || 1641 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || 1642 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1643 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || 1644 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1645 if (phydev->speed == SPEED_10) 1646 upsmr |= UCC_GETH_UPSMR_R10M; 1647 else 1648 upsmr &= ~UCC_GETH_UPSMR_R10M; 1649 } 1650 break; 1651 default: 1652 if (netif_msg_link(ugeth)) 1653 ugeth_warn( 1654 "%s: Ack! Speed (%d) is not 10/100/1000!", 1655 dev->name, phydev->speed); 1656 break; 1657 } 1658 ugeth->oldspeed = phydev->speed; 1659 } 1660 1661 if (!ugeth->oldlink) { 1662 new_state = 1; 1663 ugeth->oldlink = 1; 1664 } 1665 1666 if (new_state) { 1667 /* 1668 * To change the MAC configuration we need to disable 1669 * the controller. To do so, we have to either grab 1670 * ugeth->lock, which is a bad idea since 'graceful 1671 * stop' commands might take quite a while, or we can 1672 * quiesce driver's activity. 1673 */ 1674 ugeth_quiesce(ugeth); 1675 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 1676 1677 out_be32(&ug_regs->maccfg2, tempval); 1678 out_be32(&uf_regs->upsmr, upsmr); 1679 1680 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 1681 ugeth_activate(ugeth); 1682 } 1683 } else if (ugeth->oldlink) { 1684 new_state = 1; 1685 ugeth->oldlink = 0; 1686 ugeth->oldspeed = 0; 1687 ugeth->oldduplex = -1; 1688 } 1689 1690 if (new_state && netif_msg_link(ugeth)) 1691 phy_print_status(phydev); 1692} 1693 1694/* Initialize TBI PHY interface for communicating with the 1695 * SERDES lynx PHY on the chip. We communicate with this PHY 1696 * through the MDIO bus on each controller, treating it as a 1697 * "normal" PHY at the address found in the UTBIPA register. We assume 1698 * that the UTBIPA register is valid. Either the MDIO bus code will set 1699 * it to a value that doesn't conflict with other PHYs on the bus, or the 1700 * value doesn't matter, as there are no other PHYs on the bus. 1701 */ 1702static void uec_configure_serdes(struct net_device *dev) 1703{ 1704 struct ucc_geth_private *ugeth = netdev_priv(dev); 1705 struct ucc_geth_info *ug_info = ugeth->ug_info; 1706 struct phy_device *tbiphy; 1707 1708 if (!ug_info->tbi_node) { 1709 dev_warn(&dev->dev, "SGMII mode requires that the device " 1710 "tree specify a tbi-handle\n"); 1711 return; 1712 } 1713 1714 tbiphy = of_phy_find_device(ug_info->tbi_node); 1715 if (!tbiphy) { 1716 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1717 return; 1718 } 1719 1720 /* 1721 * If the link is already up, we must already be ok, and don't need to 1722 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1723 * everything for us? Resetting it takes the link down and requires 1724 * several seconds for it to come back. 1725 */ 1726 if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) 1727 return; 1728 1729 /* Single clk mode, mii mode off(for serdes communication) */ 1730 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); 1731 1732 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); 1733 1734 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); 1735} 1736 1737/* Configure the PHY for dev. 1738 * returns 0 if success. -1 if failure 1739 */ 1740static int init_phy(struct net_device *dev) 1741{ 1742 struct ucc_geth_private *priv = netdev_priv(dev); 1743 struct ucc_geth_info *ug_info = priv->ug_info; 1744 struct phy_device *phydev; 1745 1746 priv->oldlink = 0; 1747 priv->oldspeed = 0; 1748 priv->oldduplex = -1; 1749 1750 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, 1751 priv->phy_interface); 1752 if (!phydev) 1753 phydev = of_phy_connect_fixed_link(dev, &adjust_link, 1754 priv->phy_interface); 1755 if (!phydev) { 1756 dev_err(&dev->dev, "Could not attach to PHY\n"); 1757 return -ENODEV; 1758 } 1759 1760 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) 1761 uec_configure_serdes(dev); 1762 1763 phydev->supported &= (ADVERTISED_10baseT_Half | 1764 ADVERTISED_10baseT_Full | 1765 ADVERTISED_100baseT_Half | 1766 ADVERTISED_100baseT_Full); 1767 1768 if (priv->max_speed == SPEED_1000) 1769 phydev->supported |= ADVERTISED_1000baseT_Full; 1770 1771 phydev->advertising = phydev->supported; 1772 1773 priv->phydev = phydev; 1774 1775 return 0; 1776} 1777 1778static void ugeth_dump_regs(struct ucc_geth_private *ugeth) 1779{ 1780#ifdef DEBUG 1781 ucc_fast_dump_regs(ugeth->uccf); 1782 dump_regs(ugeth); 1783 dump_bds(ugeth); 1784#endif 1785} 1786 1787static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * 1788 ugeth, 1789 enum enet_addr_type 1790 enet_addr_type) 1791{ 1792 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 1793 struct ucc_fast_private *uccf; 1794 enum comm_dir comm_dir; 1795 struct list_head *p_lh; 1796 u16 i, num; 1797 u32 __iomem *addr_h; 1798 u32 __iomem *addr_l; 1799 u8 *p_counter; 1800 1801 uccf = ugeth->uccf; 1802 1803 p_82xx_addr_filt = 1804 (struct ucc_geth_82xx_address_filtering_pram __iomem *) 1805 ugeth->p_rx_glbl_pram->addressfiltering; 1806 1807 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { 1808 addr_h = &(p_82xx_addr_filt->gaddr_h); 1809 addr_l = &(p_82xx_addr_filt->gaddr_l); 1810 p_lh = &ugeth->group_hash_q; 1811 p_counter = &(ugeth->numGroupAddrInHash); 1812 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { 1813 addr_h = &(p_82xx_addr_filt->iaddr_h); 1814 addr_l = &(p_82xx_addr_filt->iaddr_l); 1815 p_lh = &ugeth->ind_hash_q; 1816 p_counter = &(ugeth->numIndAddrInHash); 1817 } else 1818 return -EINVAL; 1819 1820 comm_dir = 0; 1821 if (uccf->enabled_tx) 1822 comm_dir |= COMM_DIR_TX; 1823 if (uccf->enabled_rx) 1824 comm_dir |= COMM_DIR_RX; 1825 if (comm_dir) 1826 ugeth_disable(ugeth, comm_dir); 1827 1828 /* Clear the hash table. */ 1829 out_be32(addr_h, 0x00000000); 1830 out_be32(addr_l, 0x00000000); 1831 1832 if (!p_lh) 1833 return 0; 1834 1835 num = *p_counter; 1836 1837 /* Delete all remaining CQ elements */ 1838 for (i = 0; i < num; i++) 1839 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); 1840 1841 *p_counter = 0; 1842 1843 if (comm_dir) 1844 ugeth_enable(ugeth, comm_dir); 1845 1846 return 0; 1847} 1848 1849static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, 1850 u8 paddr_num) 1851{ 1852 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ 1853 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ 1854} 1855 1856static void ucc_geth_memclean(struct ucc_geth_private *ugeth) 1857{ 1858 u16 i, j; 1859 u8 __iomem *bd; 1860 1861 if (!ugeth) 1862 return; 1863 1864 if (ugeth->uccf) { 1865 ucc_fast_free(ugeth->uccf); 1866 ugeth->uccf = NULL; 1867 } 1868 1869 if (ugeth->p_thread_data_tx) { 1870 qe_muram_free(ugeth->thread_dat_tx_offset); 1871 ugeth->p_thread_data_tx = NULL; 1872 } 1873 if (ugeth->p_thread_data_rx) { 1874 qe_muram_free(ugeth->thread_dat_rx_offset); 1875 ugeth->p_thread_data_rx = NULL; 1876 } 1877 if (ugeth->p_exf_glbl_param) { 1878 qe_muram_free(ugeth->exf_glbl_param_offset); 1879 ugeth->p_exf_glbl_param = NULL; 1880 } 1881 if (ugeth->p_rx_glbl_pram) { 1882 qe_muram_free(ugeth->rx_glbl_pram_offset); 1883 ugeth->p_rx_glbl_pram = NULL; 1884 } 1885 if (ugeth->p_tx_glbl_pram) { 1886 qe_muram_free(ugeth->tx_glbl_pram_offset); 1887 ugeth->p_tx_glbl_pram = NULL; 1888 } 1889 if (ugeth->p_send_q_mem_reg) { 1890 qe_muram_free(ugeth->send_q_mem_reg_offset); 1891 ugeth->p_send_q_mem_reg = NULL; 1892 } 1893 if (ugeth->p_scheduler) { 1894 qe_muram_free(ugeth->scheduler_offset); 1895 ugeth->p_scheduler = NULL; 1896 } 1897 if (ugeth->p_tx_fw_statistics_pram) { 1898 qe_muram_free(ugeth->tx_fw_statistics_pram_offset); 1899 ugeth->p_tx_fw_statistics_pram = NULL; 1900 } 1901 if (ugeth->p_rx_fw_statistics_pram) { 1902 qe_muram_free(ugeth->rx_fw_statistics_pram_offset); 1903 ugeth->p_rx_fw_statistics_pram = NULL; 1904 } 1905 if (ugeth->p_rx_irq_coalescing_tbl) { 1906 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); 1907 ugeth->p_rx_irq_coalescing_tbl = NULL; 1908 } 1909 if (ugeth->p_rx_bd_qs_tbl) { 1910 qe_muram_free(ugeth->rx_bd_qs_tbl_offset); 1911 ugeth->p_rx_bd_qs_tbl = NULL; 1912 } 1913 if (ugeth->p_init_enet_param_shadow) { 1914 return_init_enet_entries(ugeth, 1915 &(ugeth->p_init_enet_param_shadow-> 1916 rxthread[0]), 1917 ENET_INIT_PARAM_MAX_ENTRIES_RX, 1918 ugeth->ug_info->riscRx, 1); 1919 return_init_enet_entries(ugeth, 1920 &(ugeth->p_init_enet_param_shadow-> 1921 txthread[0]), 1922 ENET_INIT_PARAM_MAX_ENTRIES_TX, 1923 ugeth->ug_info->riscTx, 0); 1924 kfree(ugeth->p_init_enet_param_shadow); 1925 ugeth->p_init_enet_param_shadow = NULL; 1926 } 1927 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 1928 bd = ugeth->p_tx_bd_ring[i]; 1929 if (!bd) 1930 continue; 1931 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 1932 if (ugeth->tx_skbuff[i][j]) { 1933 dma_unmap_single(ugeth->dev, 1934 in_be32(&((struct qe_bd __iomem *)bd)->buf), 1935 (in_be32((u32 __iomem *)bd) & 1936 BD_LENGTH_MASK), 1937 DMA_TO_DEVICE); 1938 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); 1939 ugeth->tx_skbuff[i][j] = NULL; 1940 } 1941 } 1942 1943 kfree(ugeth->tx_skbuff[i]); 1944 1945 if (ugeth->p_tx_bd_ring[i]) { 1946 if (ugeth->ug_info->uf_info.bd_mem_part == 1947 MEM_PART_SYSTEM) 1948 kfree((void *)ugeth->tx_bd_ring_offset[i]); 1949 else if (ugeth->ug_info->uf_info.bd_mem_part == 1950 MEM_PART_MURAM) 1951 qe_muram_free(ugeth->tx_bd_ring_offset[i]); 1952 ugeth->p_tx_bd_ring[i] = NULL; 1953 } 1954 } 1955 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 1956 if (ugeth->p_rx_bd_ring[i]) { 1957 /* Return existing data buffers in ring */ 1958 bd = ugeth->p_rx_bd_ring[i]; 1959 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 1960 if (ugeth->rx_skbuff[i][j]) { 1961 dma_unmap_single(ugeth->dev, 1962 in_be32(&((struct qe_bd __iomem *)bd)->buf), 1963 ugeth->ug_info-> 1964 uf_info.max_rx_buf_length + 1965 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 1966 DMA_FROM_DEVICE); 1967 dev_kfree_skb_any( 1968 ugeth->rx_skbuff[i][j]); 1969 ugeth->rx_skbuff[i][j] = NULL; 1970 } 1971 bd += sizeof(struct qe_bd); 1972 } 1973 1974 kfree(ugeth->rx_skbuff[i]); 1975 1976 if (ugeth->ug_info->uf_info.bd_mem_part == 1977 MEM_PART_SYSTEM) 1978 kfree((void *)ugeth->rx_bd_ring_offset[i]); 1979 else if (ugeth->ug_info->uf_info.bd_mem_part == 1980 MEM_PART_MURAM) 1981 qe_muram_free(ugeth->rx_bd_ring_offset[i]); 1982 ugeth->p_rx_bd_ring[i] = NULL; 1983 } 1984 } 1985 while (!list_empty(&ugeth->group_hash_q)) 1986 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 1987 (dequeue(&ugeth->group_hash_q))); 1988 while (!list_empty(&ugeth->ind_hash_q)) 1989 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 1990 (dequeue(&ugeth->ind_hash_q))); 1991 if (ugeth->ug_regs) { 1992 iounmap(ugeth->ug_regs); 1993 ugeth->ug_regs = NULL; 1994 } 1995 1996 skb_queue_purge(&ugeth->rx_recycle); 1997} 1998 1999static void ucc_geth_set_multi(struct net_device *dev) 2000{ 2001 struct ucc_geth_private *ugeth; 2002 struct netdev_hw_addr *ha; 2003 struct ucc_fast __iomem *uf_regs; 2004 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2005 2006 ugeth = netdev_priv(dev); 2007 2008 uf_regs = ugeth->uccf->uf_regs; 2009 2010 if (dev->flags & IFF_PROMISC) { 2011 setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); 2012 } else { 2013 clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); 2014 2015 p_82xx_addr_filt = 2016 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> 2017 p_rx_glbl_pram->addressfiltering; 2018 2019 if (dev->flags & IFF_ALLMULTI) { 2020 /* Catch all multicast addresses, so set the 2021 * filter to all 1's. 2022 */ 2023 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); 2024 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); 2025 } else { 2026 /* Clear filter and add the addresses in the list. 2027 */ 2028 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); 2029 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); 2030 2031 netdev_for_each_mc_addr(ha, dev) { 2032 /* Only support group multicast for now. 2033 */ 2034 if (!(ha->addr[0] & 1)) 2035 continue; 2036 2037 /* Ask CPM to run CRC and set bit in 2038 * filter mask. 2039 */ 2040 hw_add_addr_in_hash(ugeth, ha->addr); 2041 } 2042 } 2043 } 2044} 2045 2046static void ucc_geth_stop(struct ucc_geth_private *ugeth) 2047{ 2048 struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; 2049 struct phy_device *phydev = ugeth->phydev; 2050 2051 ugeth_vdbg("%s: IN", __func__); 2052 2053 /* Disable the controller */ 2054 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2055 2056 /* Tell the kernel the link is down */ 2057 phy_stop(phydev); 2058 2059 /* Mask all interrupts */ 2060 out_be32(ugeth->uccf->p_uccm, 0x00000000); 2061 2062 /* Clear all interrupts */ 2063 out_be32(ugeth->uccf->p_ucce, 0xffffffff); 2064 2065 /* Disable Rx and Tx */ 2066 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2067 2068 phy_disconnect(ugeth->phydev); 2069 ugeth->phydev = NULL; 2070 2071 ucc_geth_memclean(ugeth); 2072} 2073 2074static int ucc_struct_init(struct ucc_geth_private *ugeth) 2075{ 2076 struct ucc_geth_info *ug_info; 2077 struct ucc_fast_info *uf_info; 2078 int i; 2079 2080 ug_info = ugeth->ug_info; 2081 uf_info = &ug_info->uf_info; 2082 2083 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || 2084 (uf_info->bd_mem_part == MEM_PART_MURAM))) { 2085 if (netif_msg_probe(ugeth)) 2086 ugeth_err("%s: Bad memory partition value.", 2087 __func__); 2088 return -EINVAL; 2089 } 2090 2091 /* Rx BD lengths */ 2092 for (i = 0; i < ug_info->numQueuesRx; i++) { 2093 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || 2094 (ug_info->bdRingLenRx[i] % 2095 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { 2096 if (netif_msg_probe(ugeth)) 2097 ugeth_err 2098 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", 2099 __func__); 2100 return -EINVAL; 2101 } 2102 } 2103 2104 /* Tx BD lengths */ 2105 for (i = 0; i < ug_info->numQueuesTx; i++) { 2106 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { 2107 if (netif_msg_probe(ugeth)) 2108 ugeth_err 2109 ("%s: Tx BD ring length must be no smaller than 2.", 2110 __func__); 2111 return -EINVAL; 2112 } 2113 } 2114 2115 /* mrblr */ 2116 if ((uf_info->max_rx_buf_length == 0) || 2117 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { 2118 if (netif_msg_probe(ugeth)) 2119 ugeth_err 2120 ("%s: max_rx_buf_length must be non-zero multiple of 128.", 2121 __func__); 2122 return -EINVAL; 2123 } 2124 2125 /* num Tx queues */ 2126 if (ug_info->numQueuesTx > NUM_TX_QUEUES) { 2127 if (netif_msg_probe(ugeth)) 2128 ugeth_err("%s: number of tx queues too large.", __func__); 2129 return -EINVAL; 2130 } 2131 2132 /* num Rx queues */ 2133 if (ug_info->numQueuesRx > NUM_RX_QUEUES) { 2134 if (netif_msg_probe(ugeth)) 2135 ugeth_err("%s: number of rx queues too large.", __func__); 2136 return -EINVAL; 2137 } 2138 2139 /* l2qt */ 2140 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { 2141 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { 2142 if (netif_msg_probe(ugeth)) 2143 ugeth_err 2144 ("%s: VLAN priority table entry must not be" 2145 " larger than number of Rx queues.", 2146 __func__); 2147 return -EINVAL; 2148 } 2149 } 2150 2151 /* l3qt */ 2152 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { 2153 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { 2154 if (netif_msg_probe(ugeth)) 2155 ugeth_err 2156 ("%s: IP priority table entry must not be" 2157 " larger than number of Rx queues.", 2158 __func__); 2159 return -EINVAL; 2160 } 2161 } 2162 2163 if (ug_info->cam && !ug_info->ecamptr) { 2164 if (netif_msg_probe(ugeth)) 2165 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", 2166 __func__); 2167 return -EINVAL; 2168 } 2169 2170 if ((ug_info->numStationAddresses != 2171 UCC_GETH_NUM_OF_STATION_ADDRESSES_1) && 2172 ug_info->rxExtendedFiltering) { 2173 if (netif_msg_probe(ugeth)) 2174 ugeth_err("%s: Number of station addresses greater than 1 " 2175 "not allowed in extended parsing mode.", 2176 __func__); 2177 return -EINVAL; 2178 } 2179 2180 /* Generate uccm_mask for receive */ 2181 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ 2182 for (i = 0; i < ug_info->numQueuesRx; i++) 2183 uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i); 2184 2185 for (i = 0; i < ug_info->numQueuesTx; i++) 2186 uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i); 2187 /* Initialize the general fast UCC block. */ 2188 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2189 if (netif_msg_probe(ugeth)) 2190 ugeth_err("%s: Failed to init uccf.", __func__); 2191 return -ENOMEM; 2192 } 2193 2194 /* read the number of risc engines, update the riscTx and riscRx 2195 * if there are 4 riscs in QE 2196 */ 2197 if (qe_get_num_of_risc() == 4) { 2198 ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS; 2199 ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS; 2200 } 2201 2202 ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs)); 2203 if (!ugeth->ug_regs) { 2204 if (netif_msg_probe(ugeth)) 2205 ugeth_err("%s: Failed to ioremap regs.", __func__); 2206 return -ENOMEM; 2207 } 2208 2209 skb_queue_head_init(&ugeth->rx_recycle); 2210 2211 return 0; 2212} 2213 2214static int ucc_geth_startup(struct ucc_geth_private *ugeth) 2215{ 2216 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2217 struct ucc_geth_init_pram __iomem *p_init_enet_pram; 2218 struct ucc_fast_private *uccf; 2219 struct ucc_geth_info *ug_info; 2220 struct ucc_fast_info *uf_info; 2221 struct ucc_fast __iomem *uf_regs; 2222 struct ucc_geth __iomem *ug_regs; 2223 int ret_val = -EINVAL; 2224 u32 remoder = UCC_GETH_REMODER_INIT; 2225 u32 init_enet_pram_offset, cecr_subblock, command; 2226 u32 ifstat, i, j, size, l2qt, l3qt, length; 2227 u16 temoder = UCC_GETH_TEMODER_INIT; 2228 u16 test; 2229 u8 function_code = 0; 2230 u8 __iomem *bd; 2231 u8 __iomem *endOfRing; 2232 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2233 2234 ugeth_vdbg("%s: IN", __func__); 2235 uccf = ugeth->uccf; 2236 ug_info = ugeth->ug_info; 2237 uf_info = &ug_info->uf_info; 2238 uf_regs = uccf->uf_regs; 2239 ug_regs = ugeth->ug_regs; 2240 2241 switch (ug_info->numThreadsRx) { 2242 case UCC_GETH_NUM_OF_THREADS_1: 2243 numThreadsRxNumerical = 1; 2244 break; 2245 case UCC_GETH_NUM_OF_THREADS_2: 2246 numThreadsRxNumerical = 2; 2247 break; 2248 case UCC_GETH_NUM_OF_THREADS_4: 2249 numThreadsRxNumerical = 4; 2250 break; 2251 case UCC_GETH_NUM_OF_THREADS_6: 2252 numThreadsRxNumerical = 6; 2253 break; 2254 case UCC_GETH_NUM_OF_THREADS_8: 2255 numThreadsRxNumerical = 8; 2256 break; 2257 default: 2258 if (netif_msg_ifup(ugeth)) 2259 ugeth_err("%s: Bad number of Rx threads value.", 2260 __func__); 2261 return -EINVAL; 2262 break; 2263 } 2264 2265 switch (ug_info->numThreadsTx) { 2266 case UCC_GETH_NUM_OF_THREADS_1: 2267 numThreadsTxNumerical = 1; 2268 break; 2269 case UCC_GETH_NUM_OF_THREADS_2: 2270 numThreadsTxNumerical = 2; 2271 break; 2272 case UCC_GETH_NUM_OF_THREADS_4: 2273 numThreadsTxNumerical = 4; 2274 break; 2275 case UCC_GETH_NUM_OF_THREADS_6: 2276 numThreadsTxNumerical = 6; 2277 break; 2278 case UCC_GETH_NUM_OF_THREADS_8: 2279 numThreadsTxNumerical = 8; 2280 break; 2281 default: 2282 if (netif_msg_ifup(ugeth)) 2283 ugeth_err("%s: Bad number of Tx threads value.", 2284 __func__); 2285 return -EINVAL; 2286 break; 2287 } 2288 2289 /* Calculate rx_extended_features */ 2290 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || 2291 ug_info->ipAddressAlignment || 2292 (ug_info->numStationAddresses != 2293 UCC_GETH_NUM_OF_STATION_ADDRESSES_1); 2294 2295 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || 2296 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) || 2297 (ug_info->vlanOperationNonTagged != 2298 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); 2299 2300 init_default_reg_vals(&uf_regs->upsmr, 2301 &ug_regs->maccfg1, &ug_regs->maccfg2); 2302 2303 /* Set UPSMR */ 2304 /* For more details see the hardware spec. */ 2305 init_rx_parameters(ug_info->bro, 2306 ug_info->rsh, ug_info->pro, &uf_regs->upsmr); 2307 2308 /* We're going to ignore other registers for now, */ 2309 /* except as needed to get up and running */ 2310 2311 /* Set MACCFG1 */ 2312 /* For more details see the hardware spec. */ 2313 init_flow_control_params(ug_info->aufc, 2314 ug_info->receiveFlowControl, 2315 ug_info->transmitFlowControl, 2316 ug_info->pausePeriod, 2317 ug_info->extensionField, 2318 &uf_regs->upsmr, 2319 &ug_regs->uempr, &ug_regs->maccfg1); 2320 2321 setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2322 2323 /* Set IPGIFG */ 2324 /* For more details see the hardware spec. */ 2325 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, 2326 ug_info->nonBackToBackIfgPart2, 2327 ug_info-> 2328 miminumInterFrameGapEnforcement, 2329 ug_info->backToBackInterFrameGap, 2330 &ug_regs->ipgifg); 2331 if (ret_val != 0) { 2332 if (netif_msg_ifup(ugeth)) 2333 ugeth_err("%s: IPGIFG initialization parameter too large.", 2334 __func__); 2335 return ret_val; 2336 } 2337 2338 /* Set HAFDUP */ 2339 /* For more details see the hardware spec. */ 2340 ret_val = init_half_duplex_params(ug_info->altBeb, 2341 ug_info->backPressureNoBackoff, 2342 ug_info->noBackoff, 2343 ug_info->excessDefer, 2344 ug_info->altBebTruncation, 2345 ug_info->maxRetransmission, 2346 ug_info->collisionWindow, 2347 &ug_regs->hafdup); 2348 if (ret_val != 0) { 2349 if (netif_msg_ifup(ugeth)) 2350 ugeth_err("%s: Half Duplex initialization parameter too large.", 2351 __func__); 2352 return ret_val; 2353 } 2354 2355 /* Set IFSTAT */ 2356 /* For more details see the hardware spec. */ 2357 /* Read only - resets upon read */ 2358 ifstat = in_be32(&ug_regs->ifstat); 2359 2360 /* Clear UEMPR */ 2361 /* For more details see the hardware spec. */ 2362 out_be32(&ug_regs->uempr, 0); 2363 2364 /* Set UESCR */ 2365 /* For more details see the hardware spec. */ 2366 init_hw_statistics_gathering_mode((ug_info->statisticsMode & 2367 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), 2368 0, &uf_regs->upsmr, &ug_regs->uescr); 2369 2370 /* Allocate Tx bds */ 2371 for (j = 0; j < ug_info->numQueuesTx; j++) { 2372 /* Allocate in multiple of 2373 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, 2374 according to spec */ 2375 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) 2376 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2377 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2378 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % 2379 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2380 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2381 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { 2382 u32 align = 4; 2383 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2384 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2385 ugeth->tx_bd_ring_offset[j] = 2386 (u32) kmalloc((u32) (length + align), GFP_KERNEL); 2387 2388 if (ugeth->tx_bd_ring_offset[j] != 0) 2389 ugeth->p_tx_bd_ring[j] = 2390 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + 2391 align) & ~(align - 1)); 2392 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2393 ugeth->tx_bd_ring_offset[j] = 2394 qe_muram_alloc(length, 2395 UCC_GETH_TX_BD_RING_ALIGNMENT); 2396 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) 2397 ugeth->p_tx_bd_ring[j] = 2398 (u8 __iomem *) qe_muram_addr(ugeth-> 2399 tx_bd_ring_offset[j]); 2400 } 2401 if (!ugeth->p_tx_bd_ring[j]) { 2402 if (netif_msg_ifup(ugeth)) 2403 ugeth_err 2404 ("%s: Can not allocate memory for Tx bd rings.", 2405 __func__); 2406 return -ENOMEM; 2407 } 2408 /* Zero unused end of bd ring, according to spec */ 2409 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + 2410 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, 2411 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); 2412 } 2413 2414 /* Allocate Rx bds */ 2415 for (j = 0; j < ug_info->numQueuesRx; j++) { 2416 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); 2417 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { 2418 u32 align = 4; 2419 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2420 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2421 ugeth->rx_bd_ring_offset[j] = 2422 (u32) kmalloc((u32) (length + align), GFP_KERNEL); 2423 if (ugeth->rx_bd_ring_offset[j] != 0) 2424 ugeth->p_rx_bd_ring[j] = 2425 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + 2426 align) & ~(align - 1)); 2427 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2428 ugeth->rx_bd_ring_offset[j] = 2429 qe_muram_alloc(length, 2430 UCC_GETH_RX_BD_RING_ALIGNMENT); 2431 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) 2432 ugeth->p_rx_bd_ring[j] = 2433 (u8 __iomem *) qe_muram_addr(ugeth-> 2434 rx_bd_ring_offset[j]); 2435 } 2436 if (!ugeth->p_rx_bd_ring[j]) { 2437 if (netif_msg_ifup(ugeth)) 2438 ugeth_err 2439 ("%s: Can not allocate memory for Rx bd rings.", 2440 __func__); 2441 return -ENOMEM; 2442 } 2443 } 2444 2445 /* Init Tx bds */ 2446 for (j = 0; j < ug_info->numQueuesTx; j++) { 2447 /* Setup the skbuff rings */ 2448 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * 2449 ugeth->ug_info->bdRingLenTx[j], 2450 GFP_KERNEL); 2451 2452 if (ugeth->tx_skbuff[j] == NULL) { 2453 if (netif_msg_ifup(ugeth)) 2454 ugeth_err("%s: Could not allocate tx_skbuff", 2455 __func__); 2456 return -ENOMEM; 2457 } 2458 2459 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) 2460 ugeth->tx_skbuff[j][i] = NULL; 2461 2462 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; 2463 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; 2464 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { 2465 /* clear bd buffer */ 2466 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); 2467 /* set bd status and length */ 2468 out_be32((u32 __iomem *)bd, 0); 2469 bd += sizeof(struct qe_bd); 2470 } 2471 bd -= sizeof(struct qe_bd); 2472 /* set bd status and length */ 2473 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ 2474 } 2475 2476 /* Init Rx bds */ 2477 for (j = 0; j < ug_info->numQueuesRx; j++) { 2478 /* Setup the skbuff rings */ 2479 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * 2480 ugeth->ug_info->bdRingLenRx[j], 2481 GFP_KERNEL); 2482 2483 if (ugeth->rx_skbuff[j] == NULL) { 2484 if (netif_msg_ifup(ugeth)) 2485 ugeth_err("%s: Could not allocate rx_skbuff", 2486 __func__); 2487 return -ENOMEM; 2488 } 2489 2490 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) 2491 ugeth->rx_skbuff[j][i] = NULL; 2492 2493 ugeth->skb_currx[j] = 0; 2494 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; 2495 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { 2496 /* set bd status and length */ 2497 out_be32((u32 __iomem *)bd, R_I); 2498 /* clear bd buffer */ 2499 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); 2500 bd += sizeof(struct qe_bd); 2501 } 2502 bd -= sizeof(struct qe_bd); 2503 /* set bd status and length */ 2504 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ 2505 } 2506 2507 /* 2508 * Global PRAM 2509 */ 2510 /* Tx global PRAM */ 2511 /* Allocate global tx parameter RAM page */ 2512 ugeth->tx_glbl_pram_offset = 2513 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), 2514 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); 2515 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { 2516 if (netif_msg_ifup(ugeth)) 2517 ugeth_err 2518 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2519 __func__); 2520 return -ENOMEM; 2521 } 2522 ugeth->p_tx_glbl_pram = 2523 (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> 2524 tx_glbl_pram_offset); 2525 /* Zero out p_tx_glbl_pram */ 2526 memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); 2527 2528 /* Fill global PRAM */ 2529 2530 /* TQPTR */ 2531 /* Size varies with number of Tx threads */ 2532 ugeth->thread_dat_tx_offset = 2533 qe_muram_alloc(numThreadsTxNumerical * 2534 sizeof(struct ucc_geth_thread_data_tx) + 2535 32 * (numThreadsTxNumerical == 1), 2536 UCC_GETH_THREAD_DATA_ALIGNMENT); 2537 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { 2538 if (netif_msg_ifup(ugeth)) 2539 ugeth_err 2540 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2541 __func__); 2542 return -ENOMEM; 2543 } 2544 2545 ugeth->p_thread_data_tx = 2546 (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> 2547 thread_dat_tx_offset); 2548 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); 2549 2550 /* vtagtable */ 2551 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) 2552 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], 2553 ug_info->vtagtable[i]); 2554 2555 /* iphoffset */ 2556 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) 2557 out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], 2558 ug_info->iphoffset[i]); 2559 2560 /* SQPTR */ 2561 /* Size varies with number of Tx queues */ 2562 ugeth->send_q_mem_reg_offset = 2563 qe_muram_alloc(ug_info->numQueuesTx * 2564 sizeof(struct ucc_geth_send_queue_qd), 2565 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 2566 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { 2567 if (netif_msg_ifup(ugeth)) 2568 ugeth_err 2569 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2570 __func__); 2571 return -ENOMEM; 2572 } 2573 2574 ugeth->p_send_q_mem_reg = 2575 (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> 2576 send_q_mem_reg_offset); 2577 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); 2578 2579 /* Setup the table */ 2580 /* Assume BD rings are already established */ 2581 for (i = 0; i < ug_info->numQueuesTx; i++) { 2582 endOfRing = 2583 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - 2584 1) * sizeof(struct qe_bd); 2585 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { 2586 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, 2587 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); 2588 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. 2589 last_bd_completed_address, 2590 (u32) virt_to_phys(endOfRing)); 2591 } else if (ugeth->ug_info->uf_info.bd_mem_part == 2592 MEM_PART_MURAM) { 2593 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, 2594 (u32) immrbar_virt_to_phys(ugeth-> 2595 p_tx_bd_ring[i])); 2596 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. 2597 last_bd_completed_address, 2598 (u32) immrbar_virt_to_phys(endOfRing)); 2599 } 2600 } 2601 2602 /* schedulerbasepointer */ 2603 2604 if (ug_info->numQueuesTx > 1) { 2605 /* scheduler exists only if more than 1 tx queue */ 2606 ugeth->scheduler_offset = 2607 qe_muram_alloc(sizeof(struct ucc_geth_scheduler), 2608 UCC_GETH_SCHEDULER_ALIGNMENT); 2609 if (IS_ERR_VALUE(ugeth->scheduler_offset)) { 2610 if (netif_msg_ifup(ugeth)) 2611 ugeth_err 2612 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2613 __func__); 2614 return -ENOMEM; 2615 } 2616 2617 ugeth->p_scheduler = 2618 (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> 2619 scheduler_offset); 2620 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, 2621 ugeth->scheduler_offset); 2622 /* Zero out p_scheduler */ 2623 memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); 2624 2625 /* Set values in scheduler */ 2626 out_be32(&ugeth->p_scheduler->mblinterval, 2627 ug_info->mblinterval); 2628 out_be16(&ugeth->p_scheduler->nortsrbytetime, 2629 ug_info->nortsrbytetime); 2630 out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); 2631 out_8(&ugeth->p_scheduler->strictpriorityq, 2632 ug_info->strictpriorityq); 2633 out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); 2634 out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); 2635 for (i = 0; i < NUM_TX_QUEUES; i++) 2636 out_8(&ugeth->p_scheduler->weightfactor[i], 2637 ug_info->weightfactor[i]); 2638 2639 /* Set pointers to cpucount registers in scheduler */ 2640 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); 2641 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); 2642 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); 2643 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); 2644 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); 2645 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); 2646 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); 2647 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); 2648 } 2649 2650 /* schedulerbasepointer */ 2651 /* TxRMON_PTR (statistics) */ 2652 if (ug_info-> 2653 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { 2654 ugeth->tx_fw_statistics_pram_offset = 2655 qe_muram_alloc(sizeof 2656 (struct ucc_geth_tx_firmware_statistics_pram), 2657 UCC_GETH_TX_STATISTICS_ALIGNMENT); 2658 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { 2659 if (netif_msg_ifup(ugeth)) 2660 ugeth_err 2661 ("%s: Can not allocate DPRAM memory for" 2662 " p_tx_fw_statistics_pram.", 2663 __func__); 2664 return -ENOMEM; 2665 } 2666 ugeth->p_tx_fw_statistics_pram = 2667 (struct ucc_geth_tx_firmware_statistics_pram __iomem *) 2668 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); 2669 /* Zero out p_tx_fw_statistics_pram */ 2670 memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, 2671 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); 2672 } 2673 2674 /* temoder */ 2675 /* Already has speed set */ 2676 2677 if (ug_info->numQueuesTx > 1) 2678 temoder |= TEMODER_SCHEDULER_ENABLE; 2679 if (ug_info->ipCheckSumGenerate) 2680 temoder |= TEMODER_IP_CHECKSUM_GENERATE; 2681 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); 2682 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); 2683 2684 test = in_be16(&ugeth->p_tx_glbl_pram->temoder); 2685 2686 /* Function code register value to be used later */ 2687 function_code = UCC_BMR_BO_BE | UCC_BMR_GBL; 2688 /* Required for QE */ 2689 2690 /* function code register */ 2691 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); 2692 2693 /* Rx global PRAM */ 2694 /* Allocate global rx parameter RAM page */ 2695 ugeth->rx_glbl_pram_offset = 2696 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), 2697 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); 2698 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { 2699 if (netif_msg_ifup(ugeth)) 2700 ugeth_err 2701 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2702 __func__); 2703 return -ENOMEM; 2704 } 2705 ugeth->p_rx_glbl_pram = 2706 (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> 2707 rx_glbl_pram_offset); 2708 /* Zero out p_rx_glbl_pram */ 2709 memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); 2710 2711 /* Fill global PRAM */ 2712 2713 /* RQPTR */ 2714 /* Size varies with number of Rx threads */ 2715 ugeth->thread_dat_rx_offset = 2716 qe_muram_alloc(numThreadsRxNumerical * 2717 sizeof(struct ucc_geth_thread_data_rx), 2718 UCC_GETH_THREAD_DATA_ALIGNMENT); 2719 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { 2720 if (netif_msg_ifup(ugeth)) 2721 ugeth_err 2722 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2723 __func__); 2724 return -ENOMEM; 2725 } 2726 2727 ugeth->p_thread_data_rx = 2728 (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> 2729 thread_dat_rx_offset); 2730 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); 2731 2732 /* typeorlen */ 2733 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); 2734 2735 /* rxrmonbaseptr (statistics) */ 2736 if (ug_info-> 2737 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { 2738 ugeth->rx_fw_statistics_pram_offset = 2739 qe_muram_alloc(sizeof 2740 (struct ucc_geth_rx_firmware_statistics_pram), 2741 UCC_GETH_RX_STATISTICS_ALIGNMENT); 2742 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { 2743 if (netif_msg_ifup(ugeth)) 2744 ugeth_err 2745 ("%s: Can not allocate DPRAM memory for" 2746 " p_rx_fw_statistics_pram.", __func__); 2747 return -ENOMEM; 2748 } 2749 ugeth->p_rx_fw_statistics_pram = 2750 (struct ucc_geth_rx_firmware_statistics_pram __iomem *) 2751 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); 2752 /* Zero out p_rx_fw_statistics_pram */ 2753 memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, 2754 sizeof(struct ucc_geth_rx_firmware_statistics_pram)); 2755 } 2756 2757 /* intCoalescingPtr */ 2758 2759 /* Size varies with number of Rx queues */ 2760 ugeth->rx_irq_coalescing_tbl_offset = 2761 qe_muram_alloc(ug_info->numQueuesRx * 2762 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) 2763 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); 2764 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { 2765 if (netif_msg_ifup(ugeth)) 2766 ugeth_err 2767 ("%s: Can not allocate DPRAM memory for" 2768 " p_rx_irq_coalescing_tbl.", __func__); 2769 return -ENOMEM; 2770 } 2771 2772 ugeth->p_rx_irq_coalescing_tbl = 2773 (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) 2774 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); 2775 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, 2776 ugeth->rx_irq_coalescing_tbl_offset); 2777 2778 /* Fill interrupt coalescing table */ 2779 for (i = 0; i < ug_info->numQueuesRx; i++) { 2780 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. 2781 interruptcoalescingmaxvalue, 2782 ug_info->interruptcoalescingmaxvalue[i]); 2783 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. 2784 interruptcoalescingcounter, 2785 ug_info->interruptcoalescingmaxvalue[i]); 2786 } 2787 2788 /* MRBLR */ 2789 init_max_rx_buff_len(uf_info->max_rx_buf_length, 2790 &ugeth->p_rx_glbl_pram->mrblr); 2791 /* MFLR */ 2792 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); 2793 /* MINFLR */ 2794 init_min_frame_len(ug_info->minFrameLength, 2795 &ugeth->p_rx_glbl_pram->minflr, 2796 &ugeth->p_rx_glbl_pram->mrblr); 2797 /* MAXD1 */ 2798 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); 2799 /* MAXD2 */ 2800 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); 2801 2802 /* l2qt */ 2803 l2qt = 0; 2804 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) 2805 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); 2806 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); 2807 2808 /* l3qt */ 2809 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { 2810 l3qt = 0; 2811 for (i = 0; i < 8; i++) 2812 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); 2813 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); 2814 } 2815 2816 /* vlantype */ 2817 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); 2818 2819 /* vlantci */ 2820 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); 2821 2822 /* ecamptr */ 2823 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); 2824 2825 /* RBDQPTR */ 2826 /* Size varies with number of Rx queues */ 2827 ugeth->rx_bd_qs_tbl_offset = 2828 qe_muram_alloc(ug_info->numQueuesRx * 2829 (sizeof(struct ucc_geth_rx_bd_queues_entry) + 2830 sizeof(struct ucc_geth_rx_prefetched_bds)), 2831 UCC_GETH_RX_BD_QUEUES_ALIGNMENT); 2832 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { 2833 if (netif_msg_ifup(ugeth)) 2834 ugeth_err 2835 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 2836 __func__); 2837 return -ENOMEM; 2838 } 2839 2840 ugeth->p_rx_bd_qs_tbl = 2841 (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> 2842 rx_bd_qs_tbl_offset); 2843 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); 2844 /* Zero out p_rx_bd_qs_tbl */ 2845 memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, 2846 0, 2847 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + 2848 sizeof(struct ucc_geth_rx_prefetched_bds))); 2849 2850 /* Setup the table */ 2851 /* Assume BD rings are already established */ 2852 for (i = 0; i < ug_info->numQueuesRx; i++) { 2853 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { 2854 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 2855 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); 2856 } else if (ugeth->ug_info->uf_info.bd_mem_part == 2857 MEM_PART_MURAM) { 2858 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 2859 (u32) immrbar_virt_to_phys(ugeth-> 2860 p_rx_bd_ring[i])); 2861 } 2862 /* rest of fields handled by QE */ 2863 } 2864 2865 /* remoder */ 2866 /* Already has speed set */ 2867 2868 if (ugeth->rx_extended_features) 2869 remoder |= REMODER_RX_EXTENDED_FEATURES; 2870 if (ug_info->rxExtendedFiltering) 2871 remoder |= REMODER_RX_EXTENDED_FILTERING; 2872 if (ug_info->dynamicMaxFrameLength) 2873 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; 2874 if (ug_info->dynamicMinFrameLength) 2875 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; 2876 remoder |= 2877 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; 2878 remoder |= 2879 ug_info-> 2880 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; 2881 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; 2882 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); 2883 if (ug_info->ipCheckSumCheck) 2884 remoder |= REMODER_IP_CHECKSUM_CHECK; 2885 if (ug_info->ipAddressAlignment) 2886 remoder |= REMODER_IP_ADDRESS_ALIGNMENT; 2887 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); 2888 2889 /* Note that this function must be called */ 2890 /* ONLY AFTER p_tx_fw_statistics_pram */ 2891 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ 2892 init_firmware_statistics_gathering_mode((ug_info-> 2893 statisticsMode & 2894 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), 2895 (ug_info->statisticsMode & 2896 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), 2897 &ugeth->p_tx_glbl_pram->txrmonbaseptr, 2898 ugeth->tx_fw_statistics_pram_offset, 2899 &ugeth->p_rx_glbl_pram->rxrmonbaseptr, 2900 ugeth->rx_fw_statistics_pram_offset, 2901 &ugeth->p_tx_glbl_pram->temoder, 2902 &ugeth->p_rx_glbl_pram->remoder); 2903 2904 /* function code register */ 2905 out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); 2906 2907 /* initialize extended filtering */ 2908 if (ug_info->rxExtendedFiltering) { 2909 if (!ug_info->extendedFilteringChainPointer) { 2910 if (netif_msg_ifup(ugeth)) 2911 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 2912 __func__); 2913 return -EINVAL; 2914 } 2915 2916 /* Allocate memory for extended filtering Mode Global 2917 Parameters */ 2918 ugeth->exf_glbl_param_offset = 2919 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), 2920 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); 2921 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { 2922 if (netif_msg_ifup(ugeth)) 2923 ugeth_err 2924 ("%s: Can not allocate DPRAM memory for" 2925 " p_exf_glbl_param.", __func__); 2926 return -ENOMEM; 2927 } 2928 2929 ugeth->p_exf_glbl_param = 2930 (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> 2931 exf_glbl_param_offset); 2932 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, 2933 ugeth->exf_glbl_param_offset); 2934 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, 2935 (u32) ug_info->extendedFilteringChainPointer); 2936 2937 } else { /* initialize 82xx style address filtering */ 2938 2939 /* Init individual address recognition registers to disabled */ 2940 2941 for (j = 0; j < NUM_OF_PADDRS; j++) 2942 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); 2943 2944 p_82xx_addr_filt = 2945 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> 2946 p_rx_glbl_pram->addressfiltering; 2947 2948 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 2949 ENET_ADDR_TYPE_GROUP); 2950 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 2951 ENET_ADDR_TYPE_INDIVIDUAL); 2952 } 2953 2954 /* 2955 * Initialize UCC at QE level 2956 */ 2957 2958 command = QE_INIT_TX_RX; 2959 2960 /* Allocate shadow InitEnet command parameter structure. 2961 * This is needed because after the InitEnet command is executed, 2962 * the structure in DPRAM is released, because DPRAM is a premium 2963 * resource. 2964 * This shadow structure keeps a copy of what was done so that the 2965 * allocated resources can be released when the channel is freed. 2966 */ 2967 if (!(ugeth->p_init_enet_param_shadow = 2968 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { 2969 if (netif_msg_ifup(ugeth)) 2970 ugeth_err 2971 ("%s: Can not allocate memory for" 2972 " p_UccInitEnetParamShadows.", __func__); 2973 return -ENOMEM; 2974 } 2975 /* Zero out *p_init_enet_param_shadow */ 2976 memset((char *)ugeth->p_init_enet_param_shadow, 2977 0, sizeof(struct ucc_geth_init_pram)); 2978 2979 /* Fill shadow InitEnet command parameter structure */ 2980 2981 ugeth->p_init_enet_param_shadow->resinit1 = 2982 ENET_INIT_PARAM_MAGIC_RES_INIT1; 2983 ugeth->p_init_enet_param_shadow->resinit2 = 2984 ENET_INIT_PARAM_MAGIC_RES_INIT2; 2985 ugeth->p_init_enet_param_shadow->resinit3 = 2986 ENET_INIT_PARAM_MAGIC_RES_INIT3; 2987 ugeth->p_init_enet_param_shadow->resinit4 = 2988 ENET_INIT_PARAM_MAGIC_RES_INIT4; 2989 ugeth->p_init_enet_param_shadow->resinit5 = 2990 ENET_INIT_PARAM_MAGIC_RES_INIT5; 2991 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 2992 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; 2993 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 2994 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; 2995 2996 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 2997 ugeth->rx_glbl_pram_offset | ug_info->riscRx; 2998 if ((ug_info->largestexternallookupkeysize != 2999 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) && 3000 (ug_info->largestexternallookupkeysize != 3001 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) && 3002 (ug_info->largestexternallookupkeysize != 3003 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { 3004 if (netif_msg_ifup(ugeth)) 3005 ugeth_err("%s: Invalid largest External Lookup Key Size.", 3006 __func__); 3007 return -EINVAL; 3008 } 3009 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = 3010 ug_info->largestexternallookupkeysize; 3011 size = sizeof(struct ucc_geth_thread_rx_pram); 3012 if (ug_info->rxExtendedFiltering) { 3013 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 3014 if (ug_info->largestexternallookupkeysize == 3015 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 3016 size += 3017 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 3018 if (ug_info->largestexternallookupkeysize == 3019 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 3020 size += 3021 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 3022 } 3023 3024 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> 3025 p_init_enet_param_shadow->rxthread[0]), 3026 (u8) (numThreadsRxNumerical + 1) 3027 /* Rx needs one extra for terminator */ 3028 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, 3029 ug_info->riscRx, 1)) != 0) { 3030 if (netif_msg_ifup(ugeth)) 3031 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3032 __func__); 3033 return ret_val; 3034 } 3035 3036 ugeth->p_init_enet_param_shadow->txglobal = 3037 ugeth->tx_glbl_pram_offset | ug_info->riscTx; 3038 if ((ret_val = 3039 fill_init_enet_entries(ugeth, 3040 &(ugeth->p_init_enet_param_shadow-> 3041 txthread[0]), numThreadsTxNumerical, 3042 sizeof(struct ucc_geth_thread_tx_pram), 3043 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, 3044 ug_info->riscTx, 0)) != 0) { 3045 if (netif_msg_ifup(ugeth)) 3046 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3047 __func__); 3048 return ret_val; 3049 } 3050 3051 /* Load Rx bds with buffers */ 3052 for (i = 0; i < ug_info->numQueuesRx; i++) { 3053 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { 3054 if (netif_msg_ifup(ugeth)) 3055 ugeth_err("%s: Can not fill Rx bds with buffers.", 3056 __func__); 3057 return ret_val; 3058 } 3059 } 3060 3061 /* Allocate InitEnet command parameter structure */ 3062 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); 3063 if (IS_ERR_VALUE(init_enet_pram_offset)) { 3064 if (netif_msg_ifup(ugeth)) 3065 ugeth_err 3066 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3067 __func__); 3068 return -ENOMEM; 3069 } 3070 p_init_enet_pram = 3071 (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); 3072 3073 /* Copy shadow InitEnet command parameter structure into PRAM */ 3074 out_8(&p_init_enet_pram->resinit1, 3075 ugeth->p_init_enet_param_shadow->resinit1); 3076 out_8(&p_init_enet_pram->resinit2, 3077 ugeth->p_init_enet_param_shadow->resinit2); 3078 out_8(&p_init_enet_pram->resinit3, 3079 ugeth->p_init_enet_param_shadow->resinit3); 3080 out_8(&p_init_enet_pram->resinit4, 3081 ugeth->p_init_enet_param_shadow->resinit4); 3082 out_be16(&p_init_enet_pram->resinit5, 3083 ugeth->p_init_enet_param_shadow->resinit5); 3084 out_8(&p_init_enet_pram->largestexternallookupkeysize, 3085 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); 3086 out_be32(&p_init_enet_pram->rgftgfrxglobal, 3087 ugeth->p_init_enet_param_shadow->rgftgfrxglobal); 3088 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) 3089 out_be32(&p_init_enet_pram->rxthread[i], 3090 ugeth->p_init_enet_param_shadow->rxthread[i]); 3091 out_be32(&p_init_enet_pram->txglobal, 3092 ugeth->p_init_enet_param_shadow->txglobal); 3093 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) 3094 out_be32(&p_init_enet_pram->txthread[i], 3095 ugeth->p_init_enet_param_shadow->txthread[i]); 3096 3097 /* Issue QE command */ 3098 cecr_subblock = 3099 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 3100 qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 3101 init_enet_pram_offset); 3102 3103 /* Free InitEnet command parameter */ 3104 qe_muram_free(init_enet_pram_offset); 3105 3106 return 0; 3107} 3108 3109/* This is called by the kernel when a frame is ready for transmission. */ 3110/* It is pointed to by the dev->hard_start_xmit function pointer */ 3111static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) 3112{ 3113 struct ucc_geth_private *ugeth = netdev_priv(dev); 3114#ifdef CONFIG_UGETH_TX_ON_DEMAND 3115 struct ucc_fast_private *uccf; 3116#endif 3117 u8 __iomem *bd; /* BD pointer */ 3118 u32 bd_status; 3119 u8 txQ = 0; 3120 unsigned long flags; 3121 3122 ugeth_vdbg("%s: IN", __func__); 3123 3124 spin_lock_irqsave(&ugeth->lock, flags); 3125 3126 dev->stats.tx_bytes += skb->len; 3127 3128 /* Start from the next BD that should be filled */ 3129 bd = ugeth->txBd[txQ]; 3130 bd_status = in_be32((u32 __iomem *)bd); 3131 /* Save the skb pointer so we can free it later */ 3132 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; 3133 3134 /* Update the current skb pointer (wrapping if this was the last) */ 3135 ugeth->skb_curtx[txQ] = 3136 (ugeth->skb_curtx[txQ] + 3137 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3138 3139 /* set up the buffer descriptor */ 3140 out_be32(&((struct qe_bd __iomem *)bd)->buf, 3141 dma_map_single(ugeth->dev, skb->data, 3142 skb->len, DMA_TO_DEVICE)); 3143 3144 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3145 3146 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; 3147 3148 /* set bd status and length */ 3149 out_be32((u32 __iomem *)bd, bd_status); 3150 3151 /* Move to next BD in the ring */ 3152 if (!(bd_status & T_W)) 3153 bd += sizeof(struct qe_bd); 3154 else 3155 bd = ugeth->p_tx_bd_ring[txQ]; 3156 3157 /* If the next BD still needs to be cleaned up, then the bds 3158 are full. We need to tell the kernel to stop sending us stuff. */ 3159 if (bd == ugeth->confBd[txQ]) { 3160 if (!netif_queue_stopped(dev)) 3161 netif_stop_queue(dev); 3162 } 3163 3164 ugeth->txBd[txQ] = bd; 3165 3166 if (ugeth->p_scheduler) { 3167 ugeth->cpucount[txQ]++; 3168 /* Indicate to QE that there are more Tx bds ready for 3169 transmission */ 3170 /* This is done by writing a running counter of the bd 3171 count to the scheduler PRAM. */ 3172 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); 3173 } 3174 3175#ifdef CONFIG_UGETH_TX_ON_DEMAND 3176 uccf = ugeth->uccf; 3177 out_be16(uccf->p_utodr, UCC_FAST_TOD); 3178#endif 3179 spin_unlock_irqrestore(&ugeth->lock, flags); 3180 3181 return NETDEV_TX_OK; 3182} 3183 3184static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) 3185{ 3186 struct sk_buff *skb; 3187 u8 __iomem *bd; 3188 u16 length, howmany = 0; 3189 u32 bd_status; 3190 u8 *bdBuffer; 3191 struct net_device *dev; 3192 3193 ugeth_vdbg("%s: IN", __func__); 3194 3195 dev = ugeth->ndev; 3196 3197 /* collect received buffers */ 3198 bd = ugeth->rxBd[rxQ]; 3199 3200 bd_status = in_be32((u32 __iomem *)bd); 3201 3202 /* while there are received buffers and BD is full (~R_E) */ 3203 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { 3204 bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); 3205 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); 3206 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; 3207 3208 /* determine whether buffer is first, last, first and last 3209 (single buffer frame) or middle (not first and not last) */ 3210 if (!skb || 3211 (!(bd_status & (R_F | R_L))) || 3212 (bd_status & R_ERRORS_FATAL)) { 3213 if (netif_msg_rx_err(ugeth)) 3214 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3215 __func__, __LINE__, (u32) skb); 3216 if (skb) { 3217 skb->data = skb->head + NET_SKB_PAD; 3218 skb->len = 0; 3219 skb_reset_tail_pointer(skb); 3220 __skb_queue_head(&ugeth->rx_recycle, skb); 3221 } 3222 3223 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; 3224 dev->stats.rx_dropped++; 3225 } else { 3226 dev->stats.rx_packets++; 3227 howmany++; 3228 3229 /* Prep the skb for the packet */ 3230 skb_put(skb, length); 3231 3232 /* Tell the skb what kind of packet this is */ 3233 skb->protocol = eth_type_trans(skb, ugeth->ndev); 3234 3235 dev->stats.rx_bytes += length; 3236 /* Send the packet up the stack */ 3237 netif_receive_skb(skb); 3238 } 3239 3240 skb = get_new_skb(ugeth, bd); 3241 if (!skb) { 3242 if (netif_msg_rx_err(ugeth)) 3243 ugeth_warn("%s: No Rx Data Buffer", __func__); 3244 dev->stats.rx_dropped++; 3245 break; 3246 } 3247 3248 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; 3249 3250 /* update to point at the next skb */ 3251 ugeth->skb_currx[rxQ] = 3252 (ugeth->skb_currx[rxQ] + 3253 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); 3254 3255 if (bd_status & R_W) 3256 bd = ugeth->p_rx_bd_ring[rxQ]; 3257 else 3258 bd += sizeof(struct qe_bd); 3259 3260 bd_status = in_be32((u32 __iomem *)bd); 3261 } 3262 3263 ugeth->rxBd[rxQ] = bd; 3264 return howmany; 3265} 3266 3267static int ucc_geth_tx(struct net_device *dev, u8 txQ) 3268{ 3269 /* Start from the next BD that should be filled */ 3270 struct ucc_geth_private *ugeth = netdev_priv(dev); 3271 u8 __iomem *bd; /* BD pointer */ 3272 u32 bd_status; 3273 3274 bd = ugeth->confBd[txQ]; 3275 bd_status = in_be32((u32 __iomem *)bd); 3276 3277 /* Normal processing. */ 3278 while ((bd_status & T_R) == 0) { 3279 struct sk_buff *skb; 3280 3281 /* BD contains already transmitted buffer. */ 3282 /* Handle the transmitted buffer and release */ 3283 /* the BD to be used with the current frame */ 3284 3285 skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; 3286 if (!skb) 3287 break; 3288 3289 dev->stats.tx_packets++; 3290 3291 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3292 skb_recycle_check(skb, 3293 ugeth->ug_info->uf_info.max_rx_buf_length + 3294 UCC_GETH_RX_DATA_BUF_ALIGNMENT)) 3295 __skb_queue_head(&ugeth->rx_recycle, skb); 3296 else 3297 dev_kfree_skb(skb); 3298 3299 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3300 ugeth->skb_dirtytx[txQ] = 3301 (ugeth->skb_dirtytx[txQ] + 3302 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3303 3304 /* We freed a buffer, so now we can restart transmission */ 3305 if (netif_queue_stopped(dev)) 3306 netif_wake_queue(dev); 3307 3308 /* Advance the confirmation BD pointer */ 3309 if (!(bd_status & T_W)) 3310 bd += sizeof(struct qe_bd); 3311 else 3312 bd = ugeth->p_tx_bd_ring[txQ]; 3313 bd_status = in_be32((u32 __iomem *)bd); 3314 } 3315 ugeth->confBd[txQ] = bd; 3316 return 0; 3317} 3318 3319static int ucc_geth_poll(struct napi_struct *napi, int budget) 3320{ 3321 struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); 3322 struct ucc_geth_info *ug_info; 3323 int howmany, i; 3324 3325 ug_info = ugeth->ug_info; 3326 3327 /* Tx event processing */ 3328 spin_lock(&ugeth->lock); 3329 for (i = 0; i < ug_info->numQueuesTx; i++) 3330 ucc_geth_tx(ugeth->ndev, i); 3331 spin_unlock(&ugeth->lock); 3332 3333 howmany = 0; 3334 for (i = 0; i < ug_info->numQueuesRx; i++) 3335 howmany += ucc_geth_rx(ugeth, i, budget - howmany); 3336 3337 if (howmany < budget) { 3338 napi_complete(napi); 3339 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); 3340 } 3341 3342 return howmany; 3343} 3344 3345static irqreturn_t ucc_geth_irq_handler(int irq, void *info) 3346{ 3347 struct net_device *dev = info; 3348 struct ucc_geth_private *ugeth = netdev_priv(dev); 3349 struct ucc_fast_private *uccf; 3350 struct ucc_geth_info *ug_info; 3351 register u32 ucce; 3352 register u32 uccm; 3353 3354 ugeth_vdbg("%s: IN", __func__); 3355 3356 uccf = ugeth->uccf; 3357 ug_info = ugeth->ug_info; 3358 3359 /* read and clear events */ 3360 ucce = (u32) in_be32(uccf->p_ucce); 3361 uccm = (u32) in_be32(uccf->p_uccm); 3362 ucce &= uccm; 3363 out_be32(uccf->p_ucce, ucce); 3364 3365 /* check for receive events that require processing */ 3366 if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) { 3367 if (napi_schedule_prep(&ugeth->napi)) { 3368 uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS); 3369 out_be32(uccf->p_uccm, uccm); 3370 __napi_schedule(&ugeth->napi); 3371 } 3372 } 3373 3374 /* Errors and other events */ 3375 if (ucce & UCCE_OTHER) { 3376 if (ucce & UCC_GETH_UCCE_BSY) 3377 dev->stats.rx_errors++; 3378 if (ucce & UCC_GETH_UCCE_TXE) 3379 dev->stats.tx_errors++; 3380 } 3381 3382 return IRQ_HANDLED; 3383} 3384 3385#ifdef CONFIG_NET_POLL_CONTROLLER 3386/* 3387 * Polling 'interrupt' - used by things like netconsole to send skbs 3388 * without having to re-enable interrupts. It's not called while 3389 * the interrupt routine is executing. 3390 */ 3391static void ucc_netpoll(struct net_device *dev) 3392{ 3393 struct ucc_geth_private *ugeth = netdev_priv(dev); 3394 int irq = ugeth->ug_info->uf_info.irq; 3395 3396 disable_irq(irq); 3397 ucc_geth_irq_handler(irq, dev); 3398 enable_irq(irq); 3399} 3400#endif /* CONFIG_NET_POLL_CONTROLLER */ 3401 3402static int ucc_geth_set_mac_addr(struct net_device *dev, void *p) 3403{ 3404 struct ucc_geth_private *ugeth = netdev_priv(dev); 3405 struct sockaddr *addr = p; 3406 3407 if (!is_valid_ether_addr(addr->sa_data)) 3408 return -EADDRNOTAVAIL; 3409 3410 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 3411 3412 /* 3413 * If device is not running, we will set mac addr register 3414 * when opening the device. 3415 */ 3416 if (!netif_running(dev)) 3417 return 0; 3418 3419 spin_lock_irq(&ugeth->lock); 3420 init_mac_station_addr_regs(dev->dev_addr[0], 3421 dev->dev_addr[1], 3422 dev->dev_addr[2], 3423 dev->dev_addr[3], 3424 dev->dev_addr[4], 3425 dev->dev_addr[5], 3426 &ugeth->ug_regs->macstnaddr1, 3427 &ugeth->ug_regs->macstnaddr2); 3428 spin_unlock_irq(&ugeth->lock); 3429 3430 return 0; 3431} 3432 3433static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) 3434{ 3435 struct net_device *dev = ugeth->ndev; 3436 int err; 3437 3438 err = ucc_struct_init(ugeth); 3439 if (err) { 3440 if (netif_msg_ifup(ugeth)) 3441 ugeth_err("%s: Cannot configure internal struct, " 3442 "aborting.", dev->name); 3443 goto err; 3444 } 3445 3446 err = ucc_geth_startup(ugeth); 3447 if (err) { 3448 if (netif_msg_ifup(ugeth)) 3449 ugeth_err("%s: Cannot configure net device, aborting.", 3450 dev->name); 3451 goto err; 3452 } 3453 3454 err = adjust_enet_interface(ugeth); 3455 if (err) { 3456 if (netif_msg_ifup(ugeth)) 3457 ugeth_err("%s: Cannot configure net device, aborting.", 3458 dev->name); 3459 goto err; 3460 } 3461 3462 /* Set MACSTNADDR1, MACSTNADDR2 */ 3463 /* For more details see the hardware spec. */ 3464 init_mac_station_addr_regs(dev->dev_addr[0], 3465 dev->dev_addr[1], 3466 dev->dev_addr[2], 3467 dev->dev_addr[3], 3468 dev->dev_addr[4], 3469 dev->dev_addr[5], 3470 &ugeth->ug_regs->macstnaddr1, 3471 &ugeth->ug_regs->macstnaddr2); 3472 3473 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3474 if (err) { 3475 if (netif_msg_ifup(ugeth)) 3476 ugeth_err("%s: Cannot enable net device, aborting.", dev->name); 3477 goto err; 3478 } 3479 3480 return 0; 3481err: 3482 ucc_geth_stop(ugeth); 3483 return err; 3484} 3485 3486/* Called when something needs to use the ethernet device */ 3487/* Returns 0 for success. */ 3488static int ucc_geth_open(struct net_device *dev) 3489{ 3490 struct ucc_geth_private *ugeth = netdev_priv(dev); 3491 int err; 3492 3493 ugeth_vdbg("%s: IN", __func__); 3494 3495 /* Test station address */ 3496 if (dev->dev_addr[0] & ENET_GROUP_ADDR) { 3497 if (netif_msg_ifup(ugeth)) 3498 ugeth_err("%s: Multicast address used for station " 3499 "address - is this what you wanted?", 3500 __func__); 3501 return -EINVAL; 3502 } 3503 3504 err = init_phy(dev); 3505 if (err) { 3506 if (netif_msg_ifup(ugeth)) 3507 ugeth_err("%s: Cannot initialize PHY, aborting.", 3508 dev->name); 3509 return err; 3510 } 3511 3512 err = ucc_geth_init_mac(ugeth); 3513 if (err) { 3514 if (netif_msg_ifup(ugeth)) 3515 ugeth_err("%s: Cannot initialize MAC, aborting.", 3516 dev->name); 3517 goto err; 3518 } 3519 3520 err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 3521 0, "UCC Geth", dev); 3522 if (err) { 3523 if (netif_msg_ifup(ugeth)) 3524 ugeth_err("%s: Cannot get IRQ for net device, aborting.", 3525 dev->name); 3526 goto err; 3527 } 3528 3529 phy_start(ugeth->phydev); 3530 napi_enable(&ugeth->napi); 3531 netif_start_queue(dev); 3532 3533 device_set_wakeup_capable(&dev->dev, 3534 qe_alive_during_sleep() || ugeth->phydev->irq); 3535 device_set_wakeup_enable(&dev->dev, ugeth->wol_en); 3536 3537 return err; 3538 3539err: 3540 ucc_geth_stop(ugeth); 3541 return err; 3542} 3543 3544/* Stops the kernel queue, and halts the controller */ 3545static int ucc_geth_close(struct net_device *dev) 3546{ 3547 struct ucc_geth_private *ugeth = netdev_priv(dev); 3548 3549 ugeth_vdbg("%s: IN", __func__); 3550 3551 napi_disable(&ugeth->napi); 3552 3553 ucc_geth_stop(ugeth); 3554 3555 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); 3556 3557 netif_stop_queue(dev); 3558 3559 return 0; 3560} 3561 3562/* Reopen device. This will reset the MAC and PHY. */ 3563static void ucc_geth_timeout_work(struct work_struct *work) 3564{ 3565 struct ucc_geth_private *ugeth; 3566 struct net_device *dev; 3567 3568 ugeth = container_of(work, struct ucc_geth_private, timeout_work); 3569 dev = ugeth->ndev; 3570 3571 ugeth_vdbg("%s: IN", __func__); 3572 3573 dev->stats.tx_errors++; 3574 3575 ugeth_dump_regs(ugeth); 3576 3577 if (dev->flags & IFF_UP) { 3578 /* 3579 * Must reset MAC *and* PHY. This is done by reopening 3580 * the device. 3581 */ 3582 ucc_geth_close(dev); 3583 ucc_geth_open(dev); 3584 } 3585 3586 netif_tx_schedule_all(dev); 3587} 3588 3589/* 3590 * ucc_geth_timeout gets called when a packet has not been 3591 * transmitted after a set amount of time. 3592 */ 3593static void ucc_geth_timeout(struct net_device *dev) 3594{ 3595 struct ucc_geth_private *ugeth = netdev_priv(dev); 3596 3597 netif_carrier_off(dev); 3598 schedule_work(&ugeth->timeout_work); 3599} 3600 3601 3602#ifdef CONFIG_PM 3603 3604static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) 3605{ 3606 struct net_device *ndev = dev_get_drvdata(&ofdev->dev); 3607 struct ucc_geth_private *ugeth = netdev_priv(ndev); 3608 3609 if (!netif_running(ndev)) 3610 return 0; 3611 3612 netif_device_detach(ndev); 3613 napi_disable(&ugeth->napi); 3614 3615 /* 3616 * Disable the controller, otherwise we'll wakeup on any network 3617 * activity. 3618 */ 3619 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 3620 3621 if (ugeth->wol_en & WAKE_MAGIC) { 3622 setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); 3623 setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); 3624 ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX); 3625 } else if (!(ugeth->wol_en & WAKE_PHY)) { 3626 phy_stop(ugeth->phydev); 3627 } 3628 3629 return 0; 3630} 3631 3632static int ucc_geth_resume(struct platform_device *ofdev) 3633{ 3634 struct net_device *ndev = dev_get_drvdata(&ofdev->dev); 3635 struct ucc_geth_private *ugeth = netdev_priv(ndev); 3636 int err; 3637 3638 if (!netif_running(ndev)) 3639 return 0; 3640 3641 if (qe_alive_during_sleep()) { 3642 if (ugeth->wol_en & WAKE_MAGIC) { 3643 ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX); 3644 clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); 3645 clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); 3646 } 3647 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3648 } else { 3649 /* 3650 * Full reinitialization is required if QE shuts down 3651 * during sleep. 3652 */ 3653 ucc_geth_memclean(ugeth); 3654 3655 err = ucc_geth_init_mac(ugeth); 3656 if (err) { 3657 ugeth_err("%s: Cannot initialize MAC, aborting.", 3658 ndev->name); 3659 return err; 3660 } 3661 } 3662 3663 ugeth->oldlink = 0; 3664 ugeth->oldspeed = 0; 3665 ugeth->oldduplex = -1; 3666 3667 phy_stop(ugeth->phydev); 3668 phy_start(ugeth->phydev); 3669 3670 napi_enable(&ugeth->napi); 3671 netif_device_attach(ndev); 3672 3673 return 0; 3674} 3675 3676#else 3677#define ucc_geth_suspend NULL 3678#define ucc_geth_resume NULL 3679#endif 3680 3681static phy_interface_t to_phy_interface(const char *phy_connection_type) 3682{ 3683 if (strcasecmp(phy_connection_type, "mii") == 0) 3684 return PHY_INTERFACE_MODE_MII; 3685 if (strcasecmp(phy_connection_type, "gmii") == 0) 3686 return PHY_INTERFACE_MODE_GMII; 3687 if (strcasecmp(phy_connection_type, "tbi") == 0) 3688 return PHY_INTERFACE_MODE_TBI; 3689 if (strcasecmp(phy_connection_type, "rmii") == 0) 3690 return PHY_INTERFACE_MODE_RMII; 3691 if (strcasecmp(phy_connection_type, "rgmii") == 0) 3692 return PHY_INTERFACE_MODE_RGMII; 3693 if (strcasecmp(phy_connection_type, "rgmii-id") == 0) 3694 return PHY_INTERFACE_MODE_RGMII_ID; 3695 if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) 3696 return PHY_INTERFACE_MODE_RGMII_TXID; 3697 if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) 3698 return PHY_INTERFACE_MODE_RGMII_RXID; 3699 if (strcasecmp(phy_connection_type, "rtbi") == 0) 3700 return PHY_INTERFACE_MODE_RTBI; 3701 if (strcasecmp(phy_connection_type, "sgmii") == 0) 3702 return PHY_INTERFACE_MODE_SGMII; 3703 3704 return PHY_INTERFACE_MODE_MII; 3705} 3706 3707static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3708{ 3709 struct ucc_geth_private *ugeth = netdev_priv(dev); 3710 3711 if (!netif_running(dev)) 3712 return -EINVAL; 3713 3714 if (!ugeth->phydev) 3715 return -ENODEV; 3716 3717 return phy_mii_ioctl(ugeth->phydev, rq, cmd); 3718} 3719 3720static const struct net_device_ops ucc_geth_netdev_ops = { 3721 .ndo_open = ucc_geth_open, 3722 .ndo_stop = ucc_geth_close, 3723 .ndo_start_xmit = ucc_geth_start_xmit, 3724 .ndo_validate_addr = eth_validate_addr, 3725 .ndo_set_mac_address = ucc_geth_set_mac_addr, 3726 .ndo_change_mtu = eth_change_mtu, 3727 .ndo_set_multicast_list = ucc_geth_set_multi, 3728 .ndo_tx_timeout = ucc_geth_timeout, 3729 .ndo_do_ioctl = ucc_geth_ioctl, 3730#ifdef CONFIG_NET_POLL_CONTROLLER 3731 .ndo_poll_controller = ucc_netpoll, 3732#endif 3733}; 3734 3735static int ucc_geth_probe(struct platform_device* ofdev, const struct of_device_id *match) 3736{ 3737 struct device *device = &ofdev->dev; 3738 struct device_node *np = ofdev->dev.of_node; 3739 struct net_device *dev = NULL; 3740 struct ucc_geth_private *ugeth = NULL; 3741 struct ucc_geth_info *ug_info; 3742 struct resource res; 3743 int err, ucc_num, max_speed = 0; 3744 const unsigned int *prop; 3745 const char *sprop; 3746 const void *mac_addr; 3747 phy_interface_t phy_interface; 3748 static const int enet_to_speed[] = { 3749 SPEED_10, SPEED_10, SPEED_10, 3750 SPEED_100, SPEED_100, SPEED_100, 3751 SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, 3752 }; 3753 static const phy_interface_t enet_to_phy_interface[] = { 3754 PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, 3755 PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, 3756 PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, 3757 PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, 3758 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, 3759 PHY_INTERFACE_MODE_SGMII, 3760 }; 3761 3762 ugeth_vdbg("%s: IN", __func__); 3763 3764 prop = of_get_property(np, "cell-index", NULL); 3765 if (!prop) { 3766 prop = of_get_property(np, "device-id", NULL); 3767 if (!prop) 3768 return -ENODEV; 3769 } 3770 3771 ucc_num = *prop - 1; 3772 if ((ucc_num < 0) || (ucc_num > 7)) 3773 return -ENODEV; 3774 3775 ug_info = &ugeth_info[ucc_num]; 3776 if (ug_info == NULL) { 3777 if (netif_msg_probe(&debug)) 3778 ugeth_err("%s: [%d] Missing additional data!", 3779 __func__, ucc_num); 3780 return -ENODEV; 3781 } 3782 3783 ug_info->uf_info.ucc_num = ucc_num; 3784 3785 sprop = of_get_property(np, "rx-clock-name", NULL); 3786 if (sprop) { 3787 ug_info->uf_info.rx_clock = qe_clock_source(sprop); 3788 if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) || 3789 (ug_info->uf_info.rx_clock > QE_CLK24)) { 3790 printk(KERN_ERR 3791 "ucc_geth: invalid rx-clock-name property\n"); 3792 return -EINVAL; 3793 } 3794 } else { 3795 prop = of_get_property(np, "rx-clock", NULL); 3796 if (!prop) { 3797 /* If both rx-clock-name and rx-clock are missing, 3798 we want to tell people to use rx-clock-name. */ 3799 printk(KERN_ERR 3800 "ucc_geth: missing rx-clock-name property\n"); 3801 return -EINVAL; 3802 } 3803 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { 3804 printk(KERN_ERR 3805 "ucc_geth: invalid rx-clock propperty\n"); 3806 return -EINVAL; 3807 } 3808 ug_info->uf_info.rx_clock = *prop; 3809 } 3810 3811 sprop = of_get_property(np, "tx-clock-name", NULL); 3812 if (sprop) { 3813 ug_info->uf_info.tx_clock = qe_clock_source(sprop); 3814 if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) || 3815 (ug_info->uf_info.tx_clock > QE_CLK24)) { 3816 printk(KERN_ERR 3817 "ucc_geth: invalid tx-clock-name property\n"); 3818 return -EINVAL; 3819 } 3820 } else { 3821 prop = of_get_property(np, "tx-clock", NULL); 3822 if (!prop) { 3823 printk(KERN_ERR 3824 "ucc_geth: missing tx-clock-name property\n"); 3825 return -EINVAL; 3826 } 3827 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { 3828 printk(KERN_ERR 3829 "ucc_geth: invalid tx-clock property\n"); 3830 return -EINVAL; 3831 } 3832 ug_info->uf_info.tx_clock = *prop; 3833 } 3834 3835 err = of_address_to_resource(np, 0, &res); 3836 if (err) 3837 return -EINVAL; 3838 3839 ug_info->uf_info.regs = res.start; 3840 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 3841 3842 ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); 3843 3844 /* Find the TBI PHY node. If it's not there, we don't support SGMII */ 3845 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 3846 3847 /* get the phy interface type, or default to MII */ 3848 prop = of_get_property(np, "phy-connection-type", NULL); 3849 if (!prop) { 3850 /* handle interface property present in old trees */ 3851 prop = of_get_property(ug_info->phy_node, "interface", NULL); 3852 if (prop != NULL) { 3853 phy_interface = enet_to_phy_interface[*prop]; 3854 max_speed = enet_to_speed[*prop]; 3855 } else 3856 phy_interface = PHY_INTERFACE_MODE_MII; 3857 } else { 3858 phy_interface = to_phy_interface((const char *)prop); 3859 } 3860 3861 /* get speed, or derive from PHY interface */ 3862 if (max_speed == 0) 3863 switch (phy_interface) { 3864 case PHY_INTERFACE_MODE_GMII: 3865 case PHY_INTERFACE_MODE_RGMII: 3866 case PHY_INTERFACE_MODE_RGMII_ID: 3867 case PHY_INTERFACE_MODE_RGMII_RXID: 3868 case PHY_INTERFACE_MODE_RGMII_TXID: 3869 case PHY_INTERFACE_MODE_TBI: 3870 case PHY_INTERFACE_MODE_RTBI: 3871 case PHY_INTERFACE_MODE_SGMII: 3872 max_speed = SPEED_1000; 3873 break; 3874 default: 3875 max_speed = SPEED_100; 3876 break; 3877 } 3878 3879 if (max_speed == SPEED_1000) { 3880 /* configure muram FIFOs for gigabit operation */ 3881 ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; 3882 ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; 3883 ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; 3884 ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT; 3885 ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT; 3886 ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; 3887 ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; 3888 3889 /* If QE's snum number is 46 which means we need to support 3890 * 4 UECs at 1000Base-T simultaneously, we need to allocate 3891 * more Threads to Rx. 3892 */ 3893 if (qe_get_num_of_snums() == 46) 3894 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6; 3895 else 3896 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; 3897 } 3898 3899 if (netif_msg_probe(&debug)) 3900 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n", 3901 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, 3902 ug_info->uf_info.irq); 3903 3904 /* Create an ethernet device instance */ 3905 dev = alloc_etherdev(sizeof(*ugeth)); 3906 3907 if (dev == NULL) 3908 return -ENOMEM; 3909 3910 ugeth = netdev_priv(dev); 3911 spin_lock_init(&ugeth->lock); 3912 3913 /* Create CQs for hash tables */ 3914 INIT_LIST_HEAD(&ugeth->group_hash_q); 3915 INIT_LIST_HEAD(&ugeth->ind_hash_q); 3916 3917 dev_set_drvdata(device, dev); 3918 3919 /* Set the dev->base_addr to the gfar reg region */ 3920 dev->base_addr = (unsigned long)(ug_info->uf_info.regs); 3921 3922 SET_NETDEV_DEV(dev, device); 3923 3924 /* Fill in the dev structure */ 3925 uec_set_ethtool_ops(dev); 3926 dev->netdev_ops = &ucc_geth_netdev_ops; 3927 dev->watchdog_timeo = TX_TIMEOUT; 3928 INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work); 3929 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64); 3930 dev->mtu = 1500; 3931 3932 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); 3933 ugeth->phy_interface = phy_interface; 3934 ugeth->max_speed = max_speed; 3935 3936 err = register_netdev(dev); 3937 if (err) { 3938 if (netif_msg_probe(ugeth)) 3939 ugeth_err("%s: Cannot register net device, aborting.", 3940 dev->name); 3941 free_netdev(dev); 3942 return err; 3943 } 3944 3945 mac_addr = of_get_mac_address(np); 3946 if (mac_addr) 3947 memcpy(dev->dev_addr, mac_addr, 6); 3948 3949 ugeth->ug_info = ug_info; 3950 ugeth->dev = device; 3951 ugeth->ndev = dev; 3952 ugeth->node = np; 3953 3954 return 0; 3955} 3956 3957static int ucc_geth_remove(struct platform_device* ofdev) 3958{ 3959 struct device *device = &ofdev->dev; 3960 struct net_device *dev = dev_get_drvdata(device); 3961 struct ucc_geth_private *ugeth = netdev_priv(dev); 3962 3963 unregister_netdev(dev); 3964 free_netdev(dev); 3965 ucc_geth_memclean(ugeth); 3966 dev_set_drvdata(device, NULL); 3967 3968 return 0; 3969} 3970 3971static struct of_device_id ucc_geth_match[] = { 3972 { 3973 .type = "network", 3974 .compatible = "ucc_geth", 3975 }, 3976 {}, 3977}; 3978 3979MODULE_DEVICE_TABLE(of, ucc_geth_match); 3980 3981static struct of_platform_driver ucc_geth_driver = { 3982 .driver = { 3983 .name = DRV_NAME, 3984 .owner = THIS_MODULE, 3985 .of_match_table = ucc_geth_match, 3986 }, 3987 .probe = ucc_geth_probe, 3988 .remove = ucc_geth_remove, 3989 .suspend = ucc_geth_suspend, 3990 .resume = ucc_geth_resume, 3991}; 3992 3993static int __init ucc_geth_init(void) 3994{ 3995 int i, ret; 3996 3997 if (netif_msg_drv(&debug)) 3998 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); 3999 for (i = 0; i < 8; i++) 4000 memcpy(&(ugeth_info[i]), &ugeth_primary_info, 4001 sizeof(ugeth_primary_info)); 4002 4003 ret = of_register_platform_driver(&ucc_geth_driver); 4004 4005 return ret; 4006} 4007 4008static void __exit ucc_geth_exit(void) 4009{ 4010 of_unregister_platform_driver(&ucc_geth_driver); 4011} 4012 4013module_init(ucc_geth_init); 4014module_exit(ucc_geth_exit); 4015 4016MODULE_AUTHOR("Freescale Semiconductor, Inc"); 4017MODULE_DESCRIPTION(DRV_DESC); 4018MODULE_VERSION(DRV_VERSION); 4019MODULE_LICENSE("GPL"); 4020