1/*
2 * Copyright (C) 2006-2007 Freescale Semicondutor, Inc. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *	   Li Yang <leoli@freescale.com>
6 *
7 * Description:
8 * QE UCC Gigabit Ethernet Driver
9 *
10 * This program is free software; you can redistribute  it and/or modify it
11 * under  the terms of  the GNU General  Public License as published by the
12 * Free Software Foundation;  either version 2 of the  License, or (at your
13 * option) any later version.
14 */
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/errno.h>
18#include <linux/slab.h>
19#include <linux/stddef.h>
20#include <linux/interrupt.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24#include <linux/spinlock.h>
25#include <linux/mm.h>
26#include <linux/dma-mapping.h>
27#include <linux/fsl_devices.h>
28#include <linux/mii.h>
29#include <linux/phy.h>
30#include <linux/workqueue.h>
31
32#include <asm/of_platform.h>
33#include <asm/uaccess.h>
34#include <asm/irq.h>
35#include <asm/io.h>
36#include <asm/immap_qe.h>
37#include <asm/qe.h>
38#include <asm/ucc.h>
39#include <asm/ucc_fast.h>
40
41#include "ucc_geth.h"
42#include "ucc_geth_mii.h"
43
44#undef DEBUG
45
46#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
47#define DRV_NAME "ucc_geth"
48#define DRV_VERSION "1.1"
49
50#define ugeth_printk(level, format, arg...)  \
51        printk(level format "\n", ## arg)
52
53#define ugeth_dbg(format, arg...)            \
54        ugeth_printk(KERN_DEBUG , format , ## arg)
55#define ugeth_err(format, arg...)            \
56        ugeth_printk(KERN_ERR , format , ## arg)
57#define ugeth_info(format, arg...)           \
58        ugeth_printk(KERN_INFO , format , ## arg)
59#define ugeth_warn(format, arg...)           \
60        ugeth_printk(KERN_WARNING , format , ## arg)
61
62#ifdef UGETH_VERBOSE_DEBUG
63#define ugeth_vdbg ugeth_dbg
64#else
65#define ugeth_vdbg(fmt, args...) do { } while (0)
66#endif				/* UGETH_VERBOSE_DEBUG */
67
68static DEFINE_SPINLOCK(ugeth_lock);
69
70static struct ucc_geth_info ugeth_primary_info = {
71	.uf_info = {
72		    .bd_mem_part = MEM_PART_SYSTEM,
73		    .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
74		    .max_rx_buf_length = 1536,
75		    /* adjusted at startup if max-speed 1000 */
76		    .urfs = UCC_GETH_URFS_INIT,
77		    .urfet = UCC_GETH_URFET_INIT,
78		    .urfset = UCC_GETH_URFSET_INIT,
79		    .utfs = UCC_GETH_UTFS_INIT,
80		    .utfet = UCC_GETH_UTFET_INIT,
81		    .utftt = UCC_GETH_UTFTT_INIT,
82		    .ufpt = 256,
83		    .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
84		    .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
85		    .tenc = UCC_FAST_TX_ENCODING_NRZ,
86		    .renc = UCC_FAST_RX_ENCODING_NRZ,
87		    .tcrc = UCC_FAST_16_BIT_CRC,
88		    .synl = UCC_FAST_SYNC_LEN_NOT_USED,
89		    },
90	.numQueuesTx = 1,
91	.numQueuesRx = 1,
92	.extendedFilteringChainPointer = ((uint32_t) NULL),
93	.typeorlen = 3072 /*1536 */ ,
94	.nonBackToBackIfgPart1 = 0x40,
95	.nonBackToBackIfgPart2 = 0x60,
96	.miminumInterFrameGapEnforcement = 0x50,
97	.backToBackInterFrameGap = 0x60,
98	.mblinterval = 128,
99	.nortsrbytetime = 5,
100	.fracsiz = 1,
101	.strictpriorityq = 0xff,
102	.altBebTruncation = 0xa,
103	.excessDefer = 1,
104	.maxRetransmission = 0xf,
105	.collisionWindow = 0x37,
106	.receiveFlowControl = 1,
107	.maxGroupAddrInHash = 4,
108	.maxIndAddrInHash = 4,
109	.prel = 7,
110	.maxFrameLength = 1518,
111	.minFrameLength = 64,
112	.maxD1Length = 1520,
113	.maxD2Length = 1520,
114	.vlantype = 0x8100,
115	.ecamptr = ((uint32_t) NULL),
116	.eventRegMask = UCCE_OTHER,
117	.pausePeriod = 0xf000,
118	.interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
119	.bdRingLenTx = {
120			TX_BD_RING_LEN,
121			TX_BD_RING_LEN,
122			TX_BD_RING_LEN,
123			TX_BD_RING_LEN,
124			TX_BD_RING_LEN,
125			TX_BD_RING_LEN,
126			TX_BD_RING_LEN,
127			TX_BD_RING_LEN},
128
129	.bdRingLenRx = {
130			RX_BD_RING_LEN,
131			RX_BD_RING_LEN,
132			RX_BD_RING_LEN,
133			RX_BD_RING_LEN,
134			RX_BD_RING_LEN,
135			RX_BD_RING_LEN,
136			RX_BD_RING_LEN,
137			RX_BD_RING_LEN},
138
139	.numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
140	.largestexternallookupkeysize =
141	    QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
142	.statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
143	.vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
144	.vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
145	.rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
146	.aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
147	.padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
148	.numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
149	.numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
150	.riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
151	.riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
152};
153
154static struct ucc_geth_info ugeth_info[8];
155
156#ifdef DEBUG
157static void mem_disp(u8 *addr, int size)
158{
159	u8 *i;
160	int size16Aling = (size >> 4) << 4;
161	int size4Aling = (size >> 2) << 2;
162	int notAlign = 0;
163	if (size % 16)
164		notAlign = 1;
165
166	for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
167		printk("0x%08x: %08x %08x %08x %08x\r\n",
168		       (u32) i,
169		       *((u32 *) (i)),
170		       *((u32 *) (i + 4)),
171		       *((u32 *) (i + 8)), *((u32 *) (i + 12)));
172	if (notAlign == 1)
173		printk("0x%08x: ", (u32) i);
174	for (; (u32) i < (u32) addr + size4Aling; i += 4)
175		printk("%08x ", *((u32 *) (i)));
176	for (; (u32) i < (u32) addr + size; i++)
177		printk("%02x", *((u8 *) (i)));
178	if (notAlign == 1)
179		printk("\r\n");
180}
181#endif /* DEBUG */
182
183#ifdef CONFIG_UGETH_FILTERING
184static void enqueue(struct list_head *node, struct list_head *lh)
185{
186	unsigned long flags;
187
188	spin_lock_irqsave(&ugeth_lock, flags);
189	list_add_tail(node, lh);
190	spin_unlock_irqrestore(&ugeth_lock, flags);
191}
192#endif /* CONFIG_UGETH_FILTERING */
193
194static struct list_head *dequeue(struct list_head *lh)
195{
196	unsigned long flags;
197
198	spin_lock_irqsave(&ugeth_lock, flags);
199	if (!list_empty(lh)) {
200		struct list_head *node = lh->next;
201		list_del(node);
202		spin_unlock_irqrestore(&ugeth_lock, flags);
203		return node;
204	} else {
205		spin_unlock_irqrestore(&ugeth_lock, flags);
206		return NULL;
207	}
208}
209
210static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd)
211{
212	struct sk_buff *skb = NULL;
213
214	skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
215				  UCC_GETH_RX_DATA_BUF_ALIGNMENT);
216
217	if (skb == NULL)
218		return NULL;
219
220	/* We need the data buffer to be aligned properly.  We will reserve
221	 * as many bytes as needed to align the data properly
222	 */
223	skb_reserve(skb,
224		    UCC_GETH_RX_DATA_BUF_ALIGNMENT -
225		    (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
226					      1)));
227
228	skb->dev = ugeth->dev;
229
230	out_be32(&((struct qe_bd *)bd)->buf,
231		      dma_map_single(NULL,
232				     skb->data,
233				     ugeth->ug_info->uf_info.max_rx_buf_length +
234				     UCC_GETH_RX_DATA_BUF_ALIGNMENT,
235				     DMA_FROM_DEVICE));
236
237	out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W)));
238
239	return skb;
240}
241
242static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
243{
244	u8 *bd;
245	u32 bd_status;
246	struct sk_buff *skb;
247	int i;
248
249	bd = ugeth->p_rx_bd_ring[rxQ];
250	i = 0;
251
252	do {
253		bd_status = in_be32((u32*)bd);
254		skb = get_new_skb(ugeth, bd);
255
256		if (!skb)	/* If can not allocate data buffer,
257				abort. Cleanup will be elsewhere */
258			return -ENOMEM;
259
260		ugeth->rx_skbuff[rxQ][i] = skb;
261
262		/* advance the BD pointer */
263		bd += sizeof(struct qe_bd);
264		i++;
265	} while (!(bd_status & R_W));
266
267	return 0;
268}
269
270static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
271				  volatile u32 *p_start,
272				  u8 num_entries,
273				  u32 thread_size,
274				  u32 thread_alignment,
275				  enum qe_risc_allocation risc,
276				  int skip_page_for_first_entry)
277{
278	u32 init_enet_offset;
279	u8 i;
280	int snum;
281
282	for (i = 0; i < num_entries; i++) {
283		if ((snum = qe_get_snum()) < 0) {
284			ugeth_err("fill_init_enet_entries: Can not get SNUM.");
285			return snum;
286		}
287		if ((i == 0) && skip_page_for_first_entry)
288		/* First entry of Rx does not have page */
289			init_enet_offset = 0;
290		else {
291			init_enet_offset =
292			    qe_muram_alloc(thread_size, thread_alignment);
293			if (IS_ERR_VALUE(init_enet_offset)) {
294				ugeth_err
295		("fill_init_enet_entries: Can not allocate DPRAM memory.");
296				qe_put_snum((u8) snum);
297				return -ENOMEM;
298			}
299		}
300		*(p_start++) =
301		    ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
302		    | risc;
303	}
304
305	return 0;
306}
307
308static int return_init_enet_entries(struct ucc_geth_private *ugeth,
309				    volatile u32 *p_start,
310				    u8 num_entries,
311				    enum qe_risc_allocation risc,
312				    int skip_page_for_first_entry)
313{
314	u32 init_enet_offset;
315	u8 i;
316	int snum;
317
318	for (i = 0; i < num_entries; i++) {
319		/* Check that this entry was actually valid --
320		needed in case failed in allocations */
321		if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
322			snum =
323			    (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
324			    ENET_INIT_PARAM_SNUM_SHIFT;
325			qe_put_snum((u8) snum);
326			if (!((i == 0) && skip_page_for_first_entry)) {
327			/* First entry of Rx does not have page */
328				init_enet_offset =
329				    (in_be32(p_start) &
330				     ENET_INIT_PARAM_PTR_MASK);
331				qe_muram_free(init_enet_offset);
332			}
333			*(p_start++) = 0;	/* Just for cosmetics */
334		}
335	}
336
337	return 0;
338}
339
340#ifdef DEBUG
341static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
342				  volatile u32 *p_start,
343				  u8 num_entries,
344				  u32 thread_size,
345				  enum qe_risc_allocation risc,
346				  int skip_page_for_first_entry)
347{
348	u32 init_enet_offset;
349	u8 i;
350	int snum;
351
352	for (i = 0; i < num_entries; i++) {
353		/* Check that this entry was actually valid --
354		needed in case failed in allocations */
355		if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
356			snum =
357			    (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
358			    ENET_INIT_PARAM_SNUM_SHIFT;
359			qe_put_snum((u8) snum);
360			if (!((i == 0) && skip_page_for_first_entry)) {
361			/* First entry of Rx does not have page */
362				init_enet_offset =
363				    (in_be32(p_start) &
364				     ENET_INIT_PARAM_PTR_MASK);
365				ugeth_info("Init enet entry %d:", i);
366				ugeth_info("Base address: 0x%08x",
367					   (u32)
368					   qe_muram_addr(init_enet_offset));
369				mem_disp(qe_muram_addr(init_enet_offset),
370					 thread_size);
371			}
372			p_start++;
373		}
374	}
375
376	return 0;
377}
378#endif
379
380#ifdef CONFIG_UGETH_FILTERING
381static struct enet_addr_container *get_enet_addr_container(void)
382{
383	struct enet_addr_container *enet_addr_cont;
384
385	/* allocate memory */
386	enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL);
387	if (!enet_addr_cont) {
388		ugeth_err("%s: No memory for enet_addr_container object.",
389			  __FUNCTION__);
390		return NULL;
391	}
392
393	return enet_addr_cont;
394}
395#endif /* CONFIG_UGETH_FILTERING */
396
397static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
398{
399	kfree(enet_addr_cont);
400}
401
402static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
403{
404	out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]);
405	out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]);
406	out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]);
407}
408
409#ifdef CONFIG_UGETH_FILTERING
410static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
411                                u8 *p_enet_addr, u8 paddr_num)
412{
413	struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
414
415	if (!(paddr_num < NUM_OF_PADDRS)) {
416		ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__);
417		return -EINVAL;
418	}
419
420	p_82xx_addr_filt =
421	    (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
422	    addressfiltering;
423
424	/* Ethernet frames are defined in Little Endian mode,    */
425	/* therefore to insert the address we reverse the bytes. */
426	set_mac_addr(&p_82xx_addr_filt->paddr[paddr_num].h, p_enet_addr);
427	return 0;
428}
429#endif /* CONFIG_UGETH_FILTERING */
430
431static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
432{
433	struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
434
435	if (!(paddr_num < NUM_OF_PADDRS)) {
436		ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
437		return -EINVAL;
438	}
439
440	p_82xx_addr_filt =
441	    (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
442	    addressfiltering;
443
444	/* Writing address ff.ff.ff.ff.ff.ff disables address
445	recognition for this register */
446	out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
447	out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
448	out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
449
450	return 0;
451}
452
453static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
454                                u8 *p_enet_addr)
455{
456	struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
457	u32 cecr_subblock;
458
459	p_82xx_addr_filt =
460	    (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
461	    addressfiltering;
462
463	cecr_subblock =
464	    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
465
466	/* Ethernet frames are defined in Little Endian mode,
467	therefor to insert */
468	/* the address to the hash (Big Endian mode), we reverse the bytes.*/
469
470	set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
471
472	qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
473		     QE_CR_PROTOCOL_ETHERNET, 0);
474}
475
476#ifdef CONFIG_UGETH_MAGIC_PACKET
477static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
478{
479	struct ucc_fast_private *uccf;
480	struct ucc_geth *ug_regs;
481	u32 maccfg2, uccm;
482
483	uccf = ugeth->uccf;
484	ug_regs = ugeth->ug_regs;
485
486	/* Enable interrupts for magic packet detection */
487	uccm = in_be32(uccf->p_uccm);
488	uccm |= UCCE_MPD;
489	out_be32(uccf->p_uccm, uccm);
490
491	/* Enable magic packet detection */
492	maccfg2 = in_be32(&ug_regs->maccfg2);
493	maccfg2 |= MACCFG2_MPE;
494	out_be32(&ug_regs->maccfg2, maccfg2);
495}
496
497static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
498{
499	struct ucc_fast_private *uccf;
500	struct ucc_geth *ug_regs;
501	u32 maccfg2, uccm;
502
503	uccf = ugeth->uccf;
504	ug_regs = ugeth->ug_regs;
505
506	/* Disable interrupts for magic packet detection */
507	uccm = in_be32(uccf->p_uccm);
508	uccm &= ~UCCE_MPD;
509	out_be32(uccf->p_uccm, uccm);
510
511	/* Disable magic packet detection */
512	maccfg2 = in_be32(&ug_regs->maccfg2);
513	maccfg2 &= ~MACCFG2_MPE;
514	out_be32(&ug_regs->maccfg2, maccfg2);
515}
516#endif /* MAGIC_PACKET */
517
518static inline int compare_addr(u8 **addr1, u8 **addr2)
519{
520	return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
521}
522
523#ifdef DEBUG
524static void get_statistics(struct ucc_geth_private *ugeth,
525			   struct ucc_geth_tx_firmware_statistics *
526			   tx_firmware_statistics,
527			   struct ucc_geth_rx_firmware_statistics *
528			   rx_firmware_statistics,
529			   struct ucc_geth_hardware_statistics *hardware_statistics)
530{
531	struct ucc_fast *uf_regs;
532	struct ucc_geth *ug_regs;
533	struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
534	struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
535
536	ug_regs = ugeth->ug_regs;
537	uf_regs = (struct ucc_fast *) ug_regs;
538	p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
539	p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
540
541	/* Tx firmware only if user handed pointer and driver actually
542	gathers Tx firmware statistics */
543	if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
544		tx_firmware_statistics->sicoltx =
545		    in_be32(&p_tx_fw_statistics_pram->sicoltx);
546		tx_firmware_statistics->mulcoltx =
547		    in_be32(&p_tx_fw_statistics_pram->mulcoltx);
548		tx_firmware_statistics->latecoltxfr =
549		    in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
550		tx_firmware_statistics->frabortduecol =
551		    in_be32(&p_tx_fw_statistics_pram->frabortduecol);
552		tx_firmware_statistics->frlostinmactxer =
553		    in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
554		tx_firmware_statistics->carriersenseertx =
555		    in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
556		tx_firmware_statistics->frtxok =
557		    in_be32(&p_tx_fw_statistics_pram->frtxok);
558		tx_firmware_statistics->txfrexcessivedefer =
559		    in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
560		tx_firmware_statistics->txpkts256 =
561		    in_be32(&p_tx_fw_statistics_pram->txpkts256);
562		tx_firmware_statistics->txpkts512 =
563		    in_be32(&p_tx_fw_statistics_pram->txpkts512);
564		tx_firmware_statistics->txpkts1024 =
565		    in_be32(&p_tx_fw_statistics_pram->txpkts1024);
566		tx_firmware_statistics->txpktsjumbo =
567		    in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
568	}
569
570	/* Rx firmware only if user handed pointer and driver actually
571	 * gathers Rx firmware statistics */
572	if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
573		int i;
574		rx_firmware_statistics->frrxfcser =
575		    in_be32(&p_rx_fw_statistics_pram->frrxfcser);
576		rx_firmware_statistics->fraligner =
577		    in_be32(&p_rx_fw_statistics_pram->fraligner);
578		rx_firmware_statistics->inrangelenrxer =
579		    in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
580		rx_firmware_statistics->outrangelenrxer =
581		    in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
582		rx_firmware_statistics->frtoolong =
583		    in_be32(&p_rx_fw_statistics_pram->frtoolong);
584		rx_firmware_statistics->runt =
585		    in_be32(&p_rx_fw_statistics_pram->runt);
586		rx_firmware_statistics->verylongevent =
587		    in_be32(&p_rx_fw_statistics_pram->verylongevent);
588		rx_firmware_statistics->symbolerror =
589		    in_be32(&p_rx_fw_statistics_pram->symbolerror);
590		rx_firmware_statistics->dropbsy =
591		    in_be32(&p_rx_fw_statistics_pram->dropbsy);
592		for (i = 0; i < 0x8; i++)
593			rx_firmware_statistics->res0[i] =
594			    p_rx_fw_statistics_pram->res0[i];
595		rx_firmware_statistics->mismatchdrop =
596		    in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
597		rx_firmware_statistics->underpkts =
598		    in_be32(&p_rx_fw_statistics_pram->underpkts);
599		rx_firmware_statistics->pkts256 =
600		    in_be32(&p_rx_fw_statistics_pram->pkts256);
601		rx_firmware_statistics->pkts512 =
602		    in_be32(&p_rx_fw_statistics_pram->pkts512);
603		rx_firmware_statistics->pkts1024 =
604		    in_be32(&p_rx_fw_statistics_pram->pkts1024);
605		rx_firmware_statistics->pktsjumbo =
606		    in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
607		rx_firmware_statistics->frlossinmacer =
608		    in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
609		rx_firmware_statistics->pausefr =
610		    in_be32(&p_rx_fw_statistics_pram->pausefr);
611		for (i = 0; i < 0x4; i++)
612			rx_firmware_statistics->res1[i] =
613			    p_rx_fw_statistics_pram->res1[i];
614		rx_firmware_statistics->removevlan =
615		    in_be32(&p_rx_fw_statistics_pram->removevlan);
616		rx_firmware_statistics->replacevlan =
617		    in_be32(&p_rx_fw_statistics_pram->replacevlan);
618		rx_firmware_statistics->insertvlan =
619		    in_be32(&p_rx_fw_statistics_pram->insertvlan);
620	}
621
622	/* Hardware only if user handed pointer and driver actually
623	gathers hardware statistics */
624	if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
625		hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
626		hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
627		hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
628		hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
629		hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
630		hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
631		hardware_statistics->txok = in_be32(&ug_regs->txok);
632		hardware_statistics->txcf = in_be16(&ug_regs->txcf);
633		hardware_statistics->tmca = in_be32(&ug_regs->tmca);
634		hardware_statistics->tbca = in_be32(&ug_regs->tbca);
635		hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
636		hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
637		hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
638		hardware_statistics->rmca = in_be32(&ug_regs->rmca);
639		hardware_statistics->rbca = in_be32(&ug_regs->rbca);
640	}
641}
642
643static void dump_bds(struct ucc_geth_private *ugeth)
644{
645	int i;
646	int length;
647
648	for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
649		if (ugeth->p_tx_bd_ring[i]) {
650			length =
651			    (ugeth->ug_info->bdRingLenTx[i] *
652			     sizeof(struct qe_bd));
653			ugeth_info("TX BDs[%d]", i);
654			mem_disp(ugeth->p_tx_bd_ring[i], length);
655		}
656	}
657	for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
658		if (ugeth->p_rx_bd_ring[i]) {
659			length =
660			    (ugeth->ug_info->bdRingLenRx[i] *
661			     sizeof(struct qe_bd));
662			ugeth_info("RX BDs[%d]", i);
663			mem_disp(ugeth->p_rx_bd_ring[i], length);
664		}
665	}
666}
667
668static void dump_regs(struct ucc_geth_private *ugeth)
669{
670	int i;
671
672	ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
673	ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
674
675	ugeth_info("maccfg1    : addr - 0x%08x, val - 0x%08x",
676		   (u32) & ugeth->ug_regs->maccfg1,
677		   in_be32(&ugeth->ug_regs->maccfg1));
678	ugeth_info("maccfg2    : addr - 0x%08x, val - 0x%08x",
679		   (u32) & ugeth->ug_regs->maccfg2,
680		   in_be32(&ugeth->ug_regs->maccfg2));
681	ugeth_info("ipgifg     : addr - 0x%08x, val - 0x%08x",
682		   (u32) & ugeth->ug_regs->ipgifg,
683		   in_be32(&ugeth->ug_regs->ipgifg));
684	ugeth_info("hafdup     : addr - 0x%08x, val - 0x%08x",
685		   (u32) & ugeth->ug_regs->hafdup,
686		   in_be32(&ugeth->ug_regs->hafdup));
687	ugeth_info("ifctl      : addr - 0x%08x, val - 0x%08x",
688		   (u32) & ugeth->ug_regs->ifctl,
689		   in_be32(&ugeth->ug_regs->ifctl));
690	ugeth_info("ifstat     : addr - 0x%08x, val - 0x%08x",
691		   (u32) & ugeth->ug_regs->ifstat,
692		   in_be32(&ugeth->ug_regs->ifstat));
693	ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
694		   (u32) & ugeth->ug_regs->macstnaddr1,
695		   in_be32(&ugeth->ug_regs->macstnaddr1));
696	ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
697		   (u32) & ugeth->ug_regs->macstnaddr2,
698		   in_be32(&ugeth->ug_regs->macstnaddr2));
699	ugeth_info("uempr      : addr - 0x%08x, val - 0x%08x",
700		   (u32) & ugeth->ug_regs->uempr,
701		   in_be32(&ugeth->ug_regs->uempr));
702	ugeth_info("utbipar    : addr - 0x%08x, val - 0x%08x",
703		   (u32) & ugeth->ug_regs->utbipar,
704		   in_be32(&ugeth->ug_regs->utbipar));
705	ugeth_info("uescr      : addr - 0x%08x, val - 0x%04x",
706		   (u32) & ugeth->ug_regs->uescr,
707		   in_be16(&ugeth->ug_regs->uescr));
708	ugeth_info("tx64       : addr - 0x%08x, val - 0x%08x",
709		   (u32) & ugeth->ug_regs->tx64,
710		   in_be32(&ugeth->ug_regs->tx64));
711	ugeth_info("tx127      : addr - 0x%08x, val - 0x%08x",
712		   (u32) & ugeth->ug_regs->tx127,
713		   in_be32(&ugeth->ug_regs->tx127));
714	ugeth_info("tx255      : addr - 0x%08x, val - 0x%08x",
715		   (u32) & ugeth->ug_regs->tx255,
716		   in_be32(&ugeth->ug_regs->tx255));
717	ugeth_info("rx64       : addr - 0x%08x, val - 0x%08x",
718		   (u32) & ugeth->ug_regs->rx64,
719		   in_be32(&ugeth->ug_regs->rx64));
720	ugeth_info("rx127      : addr - 0x%08x, val - 0x%08x",
721		   (u32) & ugeth->ug_regs->rx127,
722		   in_be32(&ugeth->ug_regs->rx127));
723	ugeth_info("rx255      : addr - 0x%08x, val - 0x%08x",
724		   (u32) & ugeth->ug_regs->rx255,
725		   in_be32(&ugeth->ug_regs->rx255));
726	ugeth_info("txok       : addr - 0x%08x, val - 0x%08x",
727		   (u32) & ugeth->ug_regs->txok,
728		   in_be32(&ugeth->ug_regs->txok));
729	ugeth_info("txcf       : addr - 0x%08x, val - 0x%04x",
730		   (u32) & ugeth->ug_regs->txcf,
731		   in_be16(&ugeth->ug_regs->txcf));
732	ugeth_info("tmca       : addr - 0x%08x, val - 0x%08x",
733		   (u32) & ugeth->ug_regs->tmca,
734		   in_be32(&ugeth->ug_regs->tmca));
735	ugeth_info("tbca       : addr - 0x%08x, val - 0x%08x",
736		   (u32) & ugeth->ug_regs->tbca,
737		   in_be32(&ugeth->ug_regs->tbca));
738	ugeth_info("rxfok      : addr - 0x%08x, val - 0x%08x",
739		   (u32) & ugeth->ug_regs->rxfok,
740		   in_be32(&ugeth->ug_regs->rxfok));
741	ugeth_info("rxbok      : addr - 0x%08x, val - 0x%08x",
742		   (u32) & ugeth->ug_regs->rxbok,
743		   in_be32(&ugeth->ug_regs->rxbok));
744	ugeth_info("rbyt       : addr - 0x%08x, val - 0x%08x",
745		   (u32) & ugeth->ug_regs->rbyt,
746		   in_be32(&ugeth->ug_regs->rbyt));
747	ugeth_info("rmca       : addr - 0x%08x, val - 0x%08x",
748		   (u32) & ugeth->ug_regs->rmca,
749		   in_be32(&ugeth->ug_regs->rmca));
750	ugeth_info("rbca       : addr - 0x%08x, val - 0x%08x",
751		   (u32) & ugeth->ug_regs->rbca,
752		   in_be32(&ugeth->ug_regs->rbca));
753	ugeth_info("scar       : addr - 0x%08x, val - 0x%08x",
754		   (u32) & ugeth->ug_regs->scar,
755		   in_be32(&ugeth->ug_regs->scar));
756	ugeth_info("scam       : addr - 0x%08x, val - 0x%08x",
757		   (u32) & ugeth->ug_regs->scam,
758		   in_be32(&ugeth->ug_regs->scam));
759
760	if (ugeth->p_thread_data_tx) {
761		int numThreadsTxNumerical;
762		switch (ugeth->ug_info->numThreadsTx) {
763		case UCC_GETH_NUM_OF_THREADS_1:
764			numThreadsTxNumerical = 1;
765			break;
766		case UCC_GETH_NUM_OF_THREADS_2:
767			numThreadsTxNumerical = 2;
768			break;
769		case UCC_GETH_NUM_OF_THREADS_4:
770			numThreadsTxNumerical = 4;
771			break;
772		case UCC_GETH_NUM_OF_THREADS_6:
773			numThreadsTxNumerical = 6;
774			break;
775		case UCC_GETH_NUM_OF_THREADS_8:
776			numThreadsTxNumerical = 8;
777			break;
778		default:
779			numThreadsTxNumerical = 0;
780			break;
781		}
782
783		ugeth_info("Thread data TXs:");
784		ugeth_info("Base address: 0x%08x",
785			   (u32) ugeth->p_thread_data_tx);
786		for (i = 0; i < numThreadsTxNumerical; i++) {
787			ugeth_info("Thread data TX[%d]:", i);
788			ugeth_info("Base address: 0x%08x",
789				   (u32) & ugeth->p_thread_data_tx[i]);
790			mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
791				 sizeof(struct ucc_geth_thread_data_tx));
792		}
793	}
794	if (ugeth->p_thread_data_rx) {
795		int numThreadsRxNumerical;
796		switch (ugeth->ug_info->numThreadsRx) {
797		case UCC_GETH_NUM_OF_THREADS_1:
798			numThreadsRxNumerical = 1;
799			break;
800		case UCC_GETH_NUM_OF_THREADS_2:
801			numThreadsRxNumerical = 2;
802			break;
803		case UCC_GETH_NUM_OF_THREADS_4:
804			numThreadsRxNumerical = 4;
805			break;
806		case UCC_GETH_NUM_OF_THREADS_6:
807			numThreadsRxNumerical = 6;
808			break;
809		case UCC_GETH_NUM_OF_THREADS_8:
810			numThreadsRxNumerical = 8;
811			break;
812		default:
813			numThreadsRxNumerical = 0;
814			break;
815		}
816
817		ugeth_info("Thread data RX:");
818		ugeth_info("Base address: 0x%08x",
819			   (u32) ugeth->p_thread_data_rx);
820		for (i = 0; i < numThreadsRxNumerical; i++) {
821			ugeth_info("Thread data RX[%d]:", i);
822			ugeth_info("Base address: 0x%08x",
823				   (u32) & ugeth->p_thread_data_rx[i]);
824			mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
825				 sizeof(struct ucc_geth_thread_data_rx));
826		}
827	}
828	if (ugeth->p_exf_glbl_param) {
829		ugeth_info("EXF global param:");
830		ugeth_info("Base address: 0x%08x",
831			   (u32) ugeth->p_exf_glbl_param);
832		mem_disp((u8 *) ugeth->p_exf_glbl_param,
833			 sizeof(*ugeth->p_exf_glbl_param));
834	}
835	if (ugeth->p_tx_glbl_pram) {
836		ugeth_info("TX global param:");
837		ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
838		ugeth_info("temoder      : addr - 0x%08x, val - 0x%04x",
839			   (u32) & ugeth->p_tx_glbl_pram->temoder,
840			   in_be16(&ugeth->p_tx_glbl_pram->temoder));
841		ugeth_info("sqptr        : addr - 0x%08x, val - 0x%08x",
842			   (u32) & ugeth->p_tx_glbl_pram->sqptr,
843			   in_be32(&ugeth->p_tx_glbl_pram->sqptr));
844		ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
845			   (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
846			   in_be32(&ugeth->p_tx_glbl_pram->
847				   schedulerbasepointer));
848		ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
849			   (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
850			   in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
851		ugeth_info("tstate       : addr - 0x%08x, val - 0x%08x",
852			   (u32) & ugeth->p_tx_glbl_pram->tstate,
853			   in_be32(&ugeth->p_tx_glbl_pram->tstate));
854		ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
855			   (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
856			   ugeth->p_tx_glbl_pram->iphoffset[0]);
857		ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
858			   (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
859			   ugeth->p_tx_glbl_pram->iphoffset[1]);
860		ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
861			   (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
862			   ugeth->p_tx_glbl_pram->iphoffset[2]);
863		ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
864			   (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
865			   ugeth->p_tx_glbl_pram->iphoffset[3]);
866		ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
867			   (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
868			   ugeth->p_tx_glbl_pram->iphoffset[4]);
869		ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
870			   (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
871			   ugeth->p_tx_glbl_pram->iphoffset[5]);
872		ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
873			   (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
874			   ugeth->p_tx_glbl_pram->iphoffset[6]);
875		ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
876			   (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
877			   ugeth->p_tx_glbl_pram->iphoffset[7]);
878		ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
879			   (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
880			   in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
881		ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
882			   (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
883			   in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
884		ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
885			   (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
886			   in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
887		ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
888			   (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
889			   in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
890		ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
891			   (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
892			   in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
893		ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
894			   (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
895			   in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
896		ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
897			   (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
898			   in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
899		ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
900			   (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
901			   in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
902		ugeth_info("tqptr        : addr - 0x%08x, val - 0x%08x",
903			   (u32) & ugeth->p_tx_glbl_pram->tqptr,
904			   in_be32(&ugeth->p_tx_glbl_pram->tqptr));
905	}
906	if (ugeth->p_rx_glbl_pram) {
907		ugeth_info("RX global param:");
908		ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
909		ugeth_info("remoder         : addr - 0x%08x, val - 0x%08x",
910			   (u32) & ugeth->p_rx_glbl_pram->remoder,
911			   in_be32(&ugeth->p_rx_glbl_pram->remoder));
912		ugeth_info("rqptr           : addr - 0x%08x, val - 0x%08x",
913			   (u32) & ugeth->p_rx_glbl_pram->rqptr,
914			   in_be32(&ugeth->p_rx_glbl_pram->rqptr));
915		ugeth_info("typeorlen       : addr - 0x%08x, val - 0x%04x",
916			   (u32) & ugeth->p_rx_glbl_pram->typeorlen,
917			   in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
918		ugeth_info("rxgstpack       : addr - 0x%08x, val - 0x%02x",
919			   (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
920			   ugeth->p_rx_glbl_pram->rxgstpack);
921		ugeth_info("rxrmonbaseptr   : addr - 0x%08x, val - 0x%08x",
922			   (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
923			   in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
924		ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
925			   (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
926			   in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
927		ugeth_info("rstate          : addr - 0x%08x, val - 0x%02x",
928			   (u32) & ugeth->p_rx_glbl_pram->rstate,
929			   ugeth->p_rx_glbl_pram->rstate);
930		ugeth_info("mrblr           : addr - 0x%08x, val - 0x%04x",
931			   (u32) & ugeth->p_rx_glbl_pram->mrblr,
932			   in_be16(&ugeth->p_rx_glbl_pram->mrblr));
933		ugeth_info("rbdqptr         : addr - 0x%08x, val - 0x%08x",
934			   (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
935			   in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
936		ugeth_info("mflr            : addr - 0x%08x, val - 0x%04x",
937			   (u32) & ugeth->p_rx_glbl_pram->mflr,
938			   in_be16(&ugeth->p_rx_glbl_pram->mflr));
939		ugeth_info("minflr          : addr - 0x%08x, val - 0x%04x",
940			   (u32) & ugeth->p_rx_glbl_pram->minflr,
941			   in_be16(&ugeth->p_rx_glbl_pram->minflr));
942		ugeth_info("maxd1           : addr - 0x%08x, val - 0x%04x",
943			   (u32) & ugeth->p_rx_glbl_pram->maxd1,
944			   in_be16(&ugeth->p_rx_glbl_pram->maxd1));
945		ugeth_info("maxd2           : addr - 0x%08x, val - 0x%04x",
946			   (u32) & ugeth->p_rx_glbl_pram->maxd2,
947			   in_be16(&ugeth->p_rx_glbl_pram->maxd2));
948		ugeth_info("ecamptr         : addr - 0x%08x, val - 0x%08x",
949			   (u32) & ugeth->p_rx_glbl_pram->ecamptr,
950			   in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
951		ugeth_info("l2qt            : addr - 0x%08x, val - 0x%08x",
952			   (u32) & ugeth->p_rx_glbl_pram->l2qt,
953			   in_be32(&ugeth->p_rx_glbl_pram->l2qt));
954		ugeth_info("l3qt[0]         : addr - 0x%08x, val - 0x%08x",
955			   (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
956			   in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
957		ugeth_info("l3qt[1]         : addr - 0x%08x, val - 0x%08x",
958			   (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
959			   in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
960		ugeth_info("l3qt[2]         : addr - 0x%08x, val - 0x%08x",
961			   (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
962			   in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
963		ugeth_info("l3qt[3]         : addr - 0x%08x, val - 0x%08x",
964			   (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
965			   in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
966		ugeth_info("l3qt[4]         : addr - 0x%08x, val - 0x%08x",
967			   (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
968			   in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
969		ugeth_info("l3qt[5]         : addr - 0x%08x, val - 0x%08x",
970			   (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
971			   in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
972		ugeth_info("l3qt[6]         : addr - 0x%08x, val - 0x%08x",
973			   (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
974			   in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
975		ugeth_info("l3qt[7]         : addr - 0x%08x, val - 0x%08x",
976			   (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
977			   in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
978		ugeth_info("vlantype        : addr - 0x%08x, val - 0x%04x",
979			   (u32) & ugeth->p_rx_glbl_pram->vlantype,
980			   in_be16(&ugeth->p_rx_glbl_pram->vlantype));
981		ugeth_info("vlantci         : addr - 0x%08x, val - 0x%04x",
982			   (u32) & ugeth->p_rx_glbl_pram->vlantci,
983			   in_be16(&ugeth->p_rx_glbl_pram->vlantci));
984		for (i = 0; i < 64; i++)
985			ugeth_info
986		    ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
987			     i,
988			     (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
989			     ugeth->p_rx_glbl_pram->addressfiltering[i]);
990		ugeth_info("exfGlobalParam  : addr - 0x%08x, val - 0x%08x",
991			   (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
992			   in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
993	}
994	if (ugeth->p_send_q_mem_reg) {
995		ugeth_info("Send Q memory registers:");
996		ugeth_info("Base address: 0x%08x",
997			   (u32) ugeth->p_send_q_mem_reg);
998		for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
999			ugeth_info("SQQD[%d]:", i);
1000			ugeth_info("Base address: 0x%08x",
1001				   (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
1002			mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
1003				 sizeof(struct ucc_geth_send_queue_qd));
1004		}
1005	}
1006	if (ugeth->p_scheduler) {
1007		ugeth_info("Scheduler:");
1008		ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
1009		mem_disp((u8 *) ugeth->p_scheduler,
1010			 sizeof(*ugeth->p_scheduler));
1011	}
1012	if (ugeth->p_tx_fw_statistics_pram) {
1013		ugeth_info("TX FW statistics pram:");
1014		ugeth_info("Base address: 0x%08x",
1015			   (u32) ugeth->p_tx_fw_statistics_pram);
1016		mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
1017			 sizeof(*ugeth->p_tx_fw_statistics_pram));
1018	}
1019	if (ugeth->p_rx_fw_statistics_pram) {
1020		ugeth_info("RX FW statistics pram:");
1021		ugeth_info("Base address: 0x%08x",
1022			   (u32) ugeth->p_rx_fw_statistics_pram);
1023		mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
1024			 sizeof(*ugeth->p_rx_fw_statistics_pram));
1025	}
1026	if (ugeth->p_rx_irq_coalescing_tbl) {
1027		ugeth_info("RX IRQ coalescing tables:");
1028		ugeth_info("Base address: 0x%08x",
1029			   (u32) ugeth->p_rx_irq_coalescing_tbl);
1030		for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1031			ugeth_info("RX IRQ coalescing table entry[%d]:", i);
1032			ugeth_info("Base address: 0x%08x",
1033				   (u32) & ugeth->p_rx_irq_coalescing_tbl->
1034				   coalescingentry[i]);
1035			ugeth_info
1036		("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
1037			     (u32) & ugeth->p_rx_irq_coalescing_tbl->
1038			     coalescingentry[i].interruptcoalescingmaxvalue,
1039			     in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1040				     coalescingentry[i].
1041				     interruptcoalescingmaxvalue));
1042			ugeth_info
1043		("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
1044			     (u32) & ugeth->p_rx_irq_coalescing_tbl->
1045			     coalescingentry[i].interruptcoalescingcounter,
1046			     in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1047				     coalescingentry[i].
1048				     interruptcoalescingcounter));
1049		}
1050	}
1051	if (ugeth->p_rx_bd_qs_tbl) {
1052		ugeth_info("RX BD QS tables:");
1053		ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
1054		for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1055			ugeth_info("RX BD QS table[%d]:", i);
1056			ugeth_info("Base address: 0x%08x",
1057				   (u32) & ugeth->p_rx_bd_qs_tbl[i]);
1058			ugeth_info
1059			    ("bdbaseptr        : addr - 0x%08x, val - 0x%08x",
1060			     (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
1061			     in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
1062			ugeth_info
1063			    ("bdptr            : addr - 0x%08x, val - 0x%08x",
1064			     (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
1065			     in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
1066			ugeth_info
1067			    ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
1068			     (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
1069			     in_be32(&ugeth->p_rx_bd_qs_tbl[i].
1070				     externalbdbaseptr));
1071			ugeth_info
1072			    ("externalbdptr    : addr - 0x%08x, val - 0x%08x",
1073			     (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
1074			     in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
1075			ugeth_info("ucode RX Prefetched BDs:");
1076			ugeth_info("Base address: 0x%08x",
1077				   (u32)
1078				   qe_muram_addr(in_be32
1079						 (&ugeth->p_rx_bd_qs_tbl[i].
1080						  bdbaseptr)));
1081			mem_disp((u8 *)
1082				 qe_muram_addr(in_be32
1083					       (&ugeth->p_rx_bd_qs_tbl[i].
1084						bdbaseptr)),
1085				 sizeof(struct ucc_geth_rx_prefetched_bds));
1086		}
1087	}
1088	if (ugeth->p_init_enet_param_shadow) {
1089		int size;
1090		ugeth_info("Init enet param shadow:");
1091		ugeth_info("Base address: 0x%08x",
1092			   (u32) ugeth->p_init_enet_param_shadow);
1093		mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1094			 sizeof(*ugeth->p_init_enet_param_shadow));
1095
1096		size = sizeof(struct ucc_geth_thread_rx_pram);
1097		if (ugeth->ug_info->rxExtendedFiltering) {
1098			size +=
1099			    THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1100			if (ugeth->ug_info->largestexternallookupkeysize ==
1101			    QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1102				size +=
1103			THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1104			if (ugeth->ug_info->largestexternallookupkeysize ==
1105			    QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1106				size +=
1107			THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1108		}
1109
1110		dump_init_enet_entries(ugeth,
1111				       &(ugeth->p_init_enet_param_shadow->
1112					 txthread[0]),
1113				       ENET_INIT_PARAM_MAX_ENTRIES_TX,
1114				       sizeof(struct ucc_geth_thread_tx_pram),
1115				       ugeth->ug_info->riscTx, 0);
1116		dump_init_enet_entries(ugeth,
1117				       &(ugeth->p_init_enet_param_shadow->
1118					 rxthread[0]),
1119				       ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1120				       ugeth->ug_info->riscRx, 1);
1121	}
1122}
1123#endif /* DEBUG */
1124
1125static void init_default_reg_vals(volatile u32 *upsmr_register,
1126				  volatile u32 *maccfg1_register,
1127				  volatile u32 *maccfg2_register)
1128{
1129	out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1130	out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1131	out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1132}
1133
1134static int init_half_duplex_params(int alt_beb,
1135				   int back_pressure_no_backoff,
1136				   int no_backoff,
1137				   int excess_defer,
1138				   u8 alt_beb_truncation,
1139				   u8 max_retransmissions,
1140				   u8 collision_window,
1141				   volatile u32 *hafdup_register)
1142{
1143	u32 value = 0;
1144
1145	if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1146	    (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1147	    (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1148		return -EINVAL;
1149
1150	value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1151
1152	if (alt_beb)
1153		value |= HALFDUP_ALT_BEB;
1154	if (back_pressure_no_backoff)
1155		value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1156	if (no_backoff)
1157		value |= HALFDUP_NO_BACKOFF;
1158	if (excess_defer)
1159		value |= HALFDUP_EXCESSIVE_DEFER;
1160
1161	value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1162
1163	value |= collision_window;
1164
1165	out_be32(hafdup_register, value);
1166	return 0;
1167}
1168
1169static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1170				       u8 non_btb_ipg,
1171				       u8 min_ifg,
1172				       u8 btb_ipg,
1173				       volatile u32 *ipgifg_register)
1174{
1175	u32 value = 0;
1176
1177	/* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1178	IPG part 2 */
1179	if (non_btb_cs_ipg > non_btb_ipg)
1180		return -EINVAL;
1181
1182	if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1183	    (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1184	    /*(min_ifg        > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1185	    (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1186		return -EINVAL;
1187
1188	value |=
1189	    ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1190	     IPGIFG_NBTB_CS_IPG_MASK);
1191	value |=
1192	    ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1193	     IPGIFG_NBTB_IPG_MASK);
1194	value |=
1195	    ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1196	     IPGIFG_MIN_IFG_MASK);
1197	value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1198
1199	out_be32(ipgifg_register, value);
1200	return 0;
1201}
1202
1203static int init_flow_control_params(u32 automatic_flow_control_mode,
1204				    int rx_flow_control_enable,
1205				    int tx_flow_control_enable,
1206				    u16 pause_period,
1207				    u16 extension_field,
1208				    volatile u32 *upsmr_register,
1209				    volatile u32 *uempr_register,
1210				    volatile u32 *maccfg1_register)
1211{
1212	u32 value = 0;
1213
1214	/* Set UEMPR register */
1215	value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1216	value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1217	out_be32(uempr_register, value);
1218
1219	/* Set UPSMR register */
1220	value = in_be32(upsmr_register);
1221	value |= automatic_flow_control_mode;
1222	out_be32(upsmr_register, value);
1223
1224	value = in_be32(maccfg1_register);
1225	if (rx_flow_control_enable)
1226		value |= MACCFG1_FLOW_RX;
1227	if (tx_flow_control_enable)
1228		value |= MACCFG1_FLOW_TX;
1229	out_be32(maccfg1_register, value);
1230
1231	return 0;
1232}
1233
1234static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1235					     int auto_zero_hardware_statistics,
1236					     volatile u32 *upsmr_register,
1237					     volatile u16 *uescr_register)
1238{
1239	u32 upsmr_value = 0;
1240	u16 uescr_value = 0;
1241	/* Enable hardware statistics gathering if requested */
1242	if (enable_hardware_statistics) {
1243		upsmr_value = in_be32(upsmr_register);
1244		upsmr_value |= UPSMR_HSE;
1245		out_be32(upsmr_register, upsmr_value);
1246	}
1247
1248	/* Clear hardware statistics counters */
1249	uescr_value = in_be16(uescr_register);
1250	uescr_value |= UESCR_CLRCNT;
1251	/* Automatically zero hardware statistics counters on read,
1252	if requested */
1253	if (auto_zero_hardware_statistics)
1254		uescr_value |= UESCR_AUTOZ;
1255	out_be16(uescr_register, uescr_value);
1256
1257	return 0;
1258}
1259
1260static int init_firmware_statistics_gathering_mode(int
1261		enable_tx_firmware_statistics,
1262		int enable_rx_firmware_statistics,
1263		volatile u32 *tx_rmon_base_ptr,
1264		u32 tx_firmware_statistics_structure_address,
1265		volatile u32 *rx_rmon_base_ptr,
1266		u32 rx_firmware_statistics_structure_address,
1267		volatile u16 *temoder_register,
1268		volatile u32 *remoder_register)
1269{
1270	/* Note: this function does not check if */
1271	/* the parameters it receives are NULL   */
1272	u16 temoder_value;
1273	u32 remoder_value;
1274
1275	if (enable_tx_firmware_statistics) {
1276		out_be32(tx_rmon_base_ptr,
1277			 tx_firmware_statistics_structure_address);
1278		temoder_value = in_be16(temoder_register);
1279		temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
1280		out_be16(temoder_register, temoder_value);
1281	}
1282
1283	if (enable_rx_firmware_statistics) {
1284		out_be32(rx_rmon_base_ptr,
1285			 rx_firmware_statistics_structure_address);
1286		remoder_value = in_be32(remoder_register);
1287		remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
1288		out_be32(remoder_register, remoder_value);
1289	}
1290
1291	return 0;
1292}
1293
1294static int init_mac_station_addr_regs(u8 address_byte_0,
1295				      u8 address_byte_1,
1296				      u8 address_byte_2,
1297				      u8 address_byte_3,
1298				      u8 address_byte_4,
1299				      u8 address_byte_5,
1300				      volatile u32 *macstnaddr1_register,
1301				      volatile u32 *macstnaddr2_register)
1302{
1303	u32 value = 0;
1304
1305	/* Example: for a station address of 0x12345678ABCD, */
1306	/* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1307
1308	/* MACSTNADDR1 Register: */
1309
1310	/* 0                      7   8                      15  */
1311	/* station address byte 5     station address byte 4     */
1312	/* 16                     23  24                     31  */
1313	/* station address byte 3     station address byte 2     */
1314	value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1315	value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1316	value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1317	value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1318
1319	out_be32(macstnaddr1_register, value);
1320
1321	/* MACSTNADDR2 Register: */
1322
1323	/* 0                      7   8                      15  */
1324	/* station address byte 1     station address byte 0     */
1325	/* 16                     23  24                     31  */
1326	/*         reserved                   reserved           */
1327	value = 0;
1328	value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1329	value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1330
1331	out_be32(macstnaddr2_register, value);
1332
1333	return 0;
1334}
1335
1336static int init_check_frame_length_mode(int length_check,
1337					volatile u32 *maccfg2_register)
1338{
1339	u32 value = 0;
1340
1341	value = in_be32(maccfg2_register);
1342
1343	if (length_check)
1344		value |= MACCFG2_LC;
1345	else
1346		value &= ~MACCFG2_LC;
1347
1348	out_be32(maccfg2_register, value);
1349	return 0;
1350}
1351
1352static int init_preamble_length(u8 preamble_length,
1353				volatile u32 *maccfg2_register)
1354{
1355	u32 value = 0;
1356
1357	if ((preamble_length < 3) || (preamble_length > 7))
1358		return -EINVAL;
1359
1360	value = in_be32(maccfg2_register);
1361	value &= ~MACCFG2_PREL_MASK;
1362	value |= (preamble_length << MACCFG2_PREL_SHIFT);
1363	out_be32(maccfg2_register, value);
1364	return 0;
1365}
1366
1367static int init_rx_parameters(int reject_broadcast,
1368			      int receive_short_frames,
1369			      int promiscuous, volatile u32 *upsmr_register)
1370{
1371	u32 value = 0;
1372
1373	value = in_be32(upsmr_register);
1374
1375	if (reject_broadcast)
1376		value |= UPSMR_BRO;
1377	else
1378		value &= ~UPSMR_BRO;
1379
1380	if (receive_short_frames)
1381		value |= UPSMR_RSH;
1382	else
1383		value &= ~UPSMR_RSH;
1384
1385	if (promiscuous)
1386		value |= UPSMR_PRO;
1387	else
1388		value &= ~UPSMR_PRO;
1389
1390	out_be32(upsmr_register, value);
1391
1392	return 0;
1393}
1394
1395static int init_max_rx_buff_len(u16 max_rx_buf_len,
1396				volatile u16 *mrblr_register)
1397{
1398	/* max_rx_buf_len value must be a multiple of 128 */
1399	if ((max_rx_buf_len == 0)
1400	    || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1401		return -EINVAL;
1402
1403	out_be16(mrblr_register, max_rx_buf_len);
1404	return 0;
1405}
1406
1407static int init_min_frame_len(u16 min_frame_length,
1408			      volatile u16 *minflr_register,
1409			      volatile u16 *mrblr_register)
1410{
1411	u16 mrblr_value = 0;
1412
1413	mrblr_value = in_be16(mrblr_register);
1414	if (min_frame_length >= (mrblr_value - 4))
1415		return -EINVAL;
1416
1417	out_be16(minflr_register, min_frame_length);
1418	return 0;
1419}
1420
1421static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1422{
1423	struct ucc_geth_info *ug_info;
1424	struct ucc_geth *ug_regs;
1425	struct ucc_fast *uf_regs;
1426	int ret_val;
1427	u32 upsmr, maccfg2, tbiBaseAddress;
1428	u16 value;
1429
1430	ugeth_vdbg("%s: IN", __FUNCTION__);
1431
1432	ug_info = ugeth->ug_info;
1433	ug_regs = ugeth->ug_regs;
1434	uf_regs = ugeth->uccf->uf_regs;
1435
1436	/*                    Set MACCFG2                    */
1437	maccfg2 = in_be32(&ug_regs->maccfg2);
1438	maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1439	if ((ugeth->max_speed == SPEED_10) ||
1440	    (ugeth->max_speed == SPEED_100))
1441		maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1442	else if (ugeth->max_speed == SPEED_1000)
1443		maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1444	maccfg2 |= ug_info->padAndCrc;
1445	out_be32(&ug_regs->maccfg2, maccfg2);
1446
1447	/*                    Set UPSMR                      */
1448	upsmr = in_be32(&uf_regs->upsmr);
1449	upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
1450	if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1451	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1452	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1453	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1454		upsmr |= UPSMR_RPM;
1455		switch (ugeth->max_speed) {
1456		case SPEED_10:
1457			upsmr |= UPSMR_R10M;
1458			/* FALLTHROUGH */
1459		case SPEED_100:
1460			if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
1461				upsmr |= UPSMR_RMM;
1462		}
1463	}
1464	if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1465	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1466		upsmr |= UPSMR_TBIM;
1467	}
1468	out_be32(&uf_regs->upsmr, upsmr);
1469
1470	/* Disable autonegotiation in tbi mode, because by default it
1471	comes up in autonegotiation mode. */
1472	/* Note that this depends on proper setting in utbipar register. */
1473	if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1474	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1475		tbiBaseAddress = in_be32(&ug_regs->utbipar);
1476		tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
1477		tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
1478		value = ugeth->phydev->bus->read(ugeth->phydev->bus,
1479				(u8) tbiBaseAddress, ENET_TBI_MII_CR);
1480		value &= ~0x1000;	/* Turn off autonegotiation */
1481		ugeth->phydev->bus->write(ugeth->phydev->bus,
1482				(u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
1483	}
1484
1485	init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1486
1487	ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1488	if (ret_val != 0) {
1489		ugeth_err
1490		    ("%s: Preamble length must be between 3 and 7 inclusive.",
1491		     __FUNCTION__);
1492		return ret_val;
1493	}
1494
1495	return 0;
1496}
1497
1498/* Called every time the controller might need to be made
1499 * aware of new link state.  The PHY code conveys this
1500 * information through variables in the ugeth structure, and this
1501 * function converts those variables into the appropriate
1502 * register values, and can bring down the device if needed.
1503 */
1504
1505static void adjust_link(struct net_device *dev)
1506{
1507	struct ucc_geth_private *ugeth = netdev_priv(dev);
1508	struct ucc_geth *ug_regs;
1509	struct ucc_fast *uf_regs;
1510	struct phy_device *phydev = ugeth->phydev;
1511	unsigned long flags;
1512	int new_state = 0;
1513
1514	ug_regs = ugeth->ug_regs;
1515	uf_regs = ugeth->uccf->uf_regs;
1516
1517	spin_lock_irqsave(&ugeth->lock, flags);
1518
1519	if (phydev->link) {
1520		u32 tempval = in_be32(&ug_regs->maccfg2);
1521		u32 upsmr = in_be32(&uf_regs->upsmr);
1522		/* Now we make sure that we can be in full duplex mode.
1523		 * If not, we operate in half-duplex mode. */
1524		if (phydev->duplex != ugeth->oldduplex) {
1525			new_state = 1;
1526			if (!(phydev->duplex))
1527				tempval &= ~(MACCFG2_FDX);
1528			else
1529				tempval |= MACCFG2_FDX;
1530			ugeth->oldduplex = phydev->duplex;
1531		}
1532
1533		if (phydev->speed != ugeth->oldspeed) {
1534			new_state = 1;
1535			switch (phydev->speed) {
1536			case SPEED_1000:
1537				tempval = ((tempval &
1538					    ~(MACCFG2_INTERFACE_MODE_MASK)) |
1539					    MACCFG2_INTERFACE_MODE_BYTE);
1540				break;
1541			case SPEED_100:
1542			case SPEED_10:
1543				tempval = ((tempval &
1544					    ~(MACCFG2_INTERFACE_MODE_MASK)) |
1545					    MACCFG2_INTERFACE_MODE_NIBBLE);
1546				/* if reduced mode, re-set UPSMR.R10M */
1547				if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1548				    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1549				    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1550				    (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1551					if (phydev->speed == SPEED_10)
1552						upsmr |= UPSMR_R10M;
1553					else
1554						upsmr &= ~(UPSMR_R10M);
1555				}
1556				break;
1557			default:
1558				if (netif_msg_link(ugeth))
1559					ugeth_warn(
1560						"%s: Ack!  Speed (%d) is not 10/100/1000!",
1561						dev->name, phydev->speed);
1562				break;
1563			}
1564			ugeth->oldspeed = phydev->speed;
1565		}
1566
1567		out_be32(&ug_regs->maccfg2, tempval);
1568		out_be32(&uf_regs->upsmr, upsmr);
1569
1570		if (!ugeth->oldlink) {
1571			new_state = 1;
1572			ugeth->oldlink = 1;
1573			netif_schedule(dev);
1574		}
1575	} else if (ugeth->oldlink) {
1576			new_state = 1;
1577			ugeth->oldlink = 0;
1578			ugeth->oldspeed = 0;
1579			ugeth->oldduplex = -1;
1580	}
1581
1582	if (new_state && netif_msg_link(ugeth))
1583		phy_print_status(phydev);
1584
1585	spin_unlock_irqrestore(&ugeth->lock, flags);
1586}
1587
1588/* Configure the PHY for dev.
1589 * returns 0 if success.  -1 if failure
1590 */
1591static int init_phy(struct net_device *dev)
1592{
1593	struct ucc_geth_private *priv = netdev_priv(dev);
1594	struct phy_device *phydev;
1595	char phy_id[BUS_ID_SIZE];
1596
1597	priv->oldlink = 0;
1598	priv->oldspeed = 0;
1599	priv->oldduplex = -1;
1600
1601	snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->ug_info->mdio_bus,
1602			priv->ug_info->phy_address);
1603
1604	phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
1605
1606	if (IS_ERR(phydev)) {
1607		printk("%s: Could not attach to PHY\n", dev->name);
1608		return PTR_ERR(phydev);
1609	}
1610
1611	phydev->supported &= (ADVERTISED_10baseT_Half |
1612				 ADVERTISED_10baseT_Full |
1613				 ADVERTISED_100baseT_Half |
1614				 ADVERTISED_100baseT_Full);
1615
1616	if (priv->max_speed == SPEED_1000)
1617		phydev->supported |= ADVERTISED_1000baseT_Full;
1618
1619	phydev->advertising = phydev->supported;
1620
1621	priv->phydev = phydev;
1622
1623	return 0;
1624}
1625
1626
1627
1628static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
1629{
1630	struct ucc_fast_private *uccf;
1631	u32 cecr_subblock;
1632	u32 temp;
1633
1634	uccf = ugeth->uccf;
1635
1636	/* Mask GRACEFUL STOP TX interrupt bit and clear it */
1637	temp = in_be32(uccf->p_uccm);
1638	temp &= ~UCCE_GRA;
1639	out_be32(uccf->p_uccm, temp);
1640	out_be32(uccf->p_ucce, UCCE_GRA);	/* clear by writing 1 */
1641
1642	/* Issue host command */
1643	cecr_subblock =
1644	    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1645	qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1646		     QE_CR_PROTOCOL_ETHERNET, 0);
1647
1648	/* Wait for command to complete */
1649	do {
1650		temp = in_be32(uccf->p_ucce);
1651	} while (!(temp & UCCE_GRA));
1652
1653	uccf->stopped_tx = 1;
1654
1655	return 0;
1656}
1657
1658static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
1659{
1660	struct ucc_fast_private *uccf;
1661	u32 cecr_subblock;
1662	u8 temp;
1663
1664	uccf = ugeth->uccf;
1665
1666	/* Clear acknowledge bit */
1667	temp = ugeth->p_rx_glbl_pram->rxgstpack;
1668	temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1669	ugeth->p_rx_glbl_pram->rxgstpack = temp;
1670
1671	/* Keep issuing command and checking acknowledge bit until
1672	it is asserted, according to spec */
1673	do {
1674		/* Issue host command */
1675		cecr_subblock =
1676		    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1677						ucc_num);
1678		qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1679			     QE_CR_PROTOCOL_ETHERNET, 0);
1680
1681		temp = ugeth->p_rx_glbl_pram->rxgstpack;
1682	} while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1683
1684	uccf->stopped_rx = 1;
1685
1686	return 0;
1687}
1688
1689static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
1690{
1691	struct ucc_fast_private *uccf;
1692	u32 cecr_subblock;
1693
1694	uccf = ugeth->uccf;
1695
1696	cecr_subblock =
1697	    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1698	qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
1699	uccf->stopped_tx = 0;
1700
1701	return 0;
1702}
1703
1704static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
1705{
1706	struct ucc_fast_private *uccf;
1707	u32 cecr_subblock;
1708
1709	uccf = ugeth->uccf;
1710
1711	cecr_subblock =
1712	    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1713	qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
1714		     0);
1715	uccf->stopped_rx = 0;
1716
1717	return 0;
1718}
1719
1720static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1721{
1722	struct ucc_fast_private *uccf;
1723	int enabled_tx, enabled_rx;
1724
1725	uccf = ugeth->uccf;
1726
1727	/* check if the UCC number is in range. */
1728	if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1729		ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
1730		return -EINVAL;
1731	}
1732
1733	enabled_tx = uccf->enabled_tx;
1734	enabled_rx = uccf->enabled_rx;
1735
1736	/* Get Tx and Rx going again, in case this channel was actively
1737	disabled. */
1738	if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
1739		ugeth_restart_tx(ugeth);
1740	if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
1741		ugeth_restart_rx(ugeth);
1742
1743	ucc_fast_enable(uccf, mode);	/* OK to do even if not disabled */
1744
1745	return 0;
1746
1747}
1748
1749static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
1750{
1751	struct ucc_fast_private *uccf;
1752
1753	uccf = ugeth->uccf;
1754
1755	/* check if the UCC number is in range. */
1756	if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1757		ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
1758		return -EINVAL;
1759	}
1760
1761	/* Stop any transmissions */
1762	if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
1763		ugeth_graceful_stop_tx(ugeth);
1764
1765	/* Stop any receptions */
1766	if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
1767		ugeth_graceful_stop_rx(ugeth);
1768
1769	ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
1770
1771	return 0;
1772}
1773
1774static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
1775{
1776#ifdef DEBUG
1777	ucc_fast_dump_regs(ugeth->uccf);
1778	dump_regs(ugeth);
1779	dump_bds(ugeth);
1780#endif
1781}
1782
1783#ifdef CONFIG_UGETH_FILTERING
1784static int ugeth_ext_filtering_serialize_tad(struct ucc_geth_tad_params *
1785					     p_UccGethTadParams,
1786					     struct qe_fltr_tad *qe_fltr_tad)
1787{
1788	u16 temp;
1789
1790	/* Zero serialized TAD */
1791	memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
1792
1793	qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V;	/* Must have this */
1794	if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
1795	    (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
1796	    || (p_UccGethTadParams->vnontag_op !=
1797		UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
1798	    )
1799		qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
1800	if (p_UccGethTadParams->reject_frame)
1801		qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
1802	temp =
1803	    (u16) (((u16) p_UccGethTadParams->
1804		    vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
1805	qe_fltr_tad->serialized[0] |= (u8) (temp >> 8);	/* upper bits */
1806
1807	qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff);	/* lower bits */
1808	if (p_UccGethTadParams->vnontag_op ==
1809	    UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
1810		qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
1811	qe_fltr_tad->serialized[1] |=
1812	    p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
1813
1814	qe_fltr_tad->serialized[2] |=
1815	    p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
1816	/* upper bits */
1817	qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
1818	/* lower bits */
1819	qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
1820
1821	return 0;
1822}
1823
1824static struct enet_addr_container_t
1825    *ugeth_82xx_filtering_get_match_addr_in_hash(struct ucc_geth_private *ugeth,
1826						 struct enet_addr *p_enet_addr)
1827{
1828	struct enet_addr_container *enet_addr_cont;
1829	struct list_head *p_lh;
1830	u16 i, num;
1831	int32_t j;
1832	u8 *p_counter;
1833
1834	if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1835		p_lh = &ugeth->group_hash_q;
1836		p_counter = &(ugeth->numGroupAddrInHash);
1837	} else {
1838		p_lh = &ugeth->ind_hash_q;
1839		p_counter = &(ugeth->numIndAddrInHash);
1840	}
1841
1842	if (!p_lh)
1843		return NULL;
1844
1845	num = *p_counter;
1846
1847	for (i = 0; i < num; i++) {
1848		enet_addr_cont =
1849		    (struct enet_addr_container *)
1850		    ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
1851		for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
1852			if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
1853				break;
1854			if (j == 0)
1855				return enet_addr_cont;	/* Found */
1856		}
1857		enqueue(p_lh, &enet_addr_cont->node);	/* Put it back */
1858	}
1859	return NULL;
1860}
1861
1862static int ugeth_82xx_filtering_add_addr_in_hash(struct ucc_geth_private *ugeth,
1863						 struct enet_addr *p_enet_addr)
1864{
1865	enum ucc_geth_enet_address_recognition_location location;
1866	struct enet_addr_container *enet_addr_cont;
1867	struct list_head *p_lh;
1868	u8 i;
1869	u32 limit;
1870	u8 *p_counter;
1871
1872	if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1873		p_lh = &ugeth->group_hash_q;
1874		limit = ugeth->ug_info->maxGroupAddrInHash;
1875		location =
1876		    UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
1877		p_counter = &(ugeth->numGroupAddrInHash);
1878	} else {
1879		p_lh = &ugeth->ind_hash_q;
1880		limit = ugeth->ug_info->maxIndAddrInHash;
1881		location =
1882		    UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
1883		p_counter = &(ugeth->numIndAddrInHash);
1884	}
1885
1886	if ((enet_addr_cont =
1887	     ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
1888		list_add(p_lh, &enet_addr_cont->node);	/* Put it back */
1889		return 0;
1890	}
1891	if ((!p_lh) || (!(*p_counter < limit)))
1892		return -EBUSY;
1893	if (!(enet_addr_cont = get_enet_addr_container()))
1894		return -ENOMEM;
1895	for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
1896		(enet_addr_cont->address)[i] = (*p_enet_addr)[i];
1897	enet_addr_cont->location = location;
1898	enqueue(p_lh, &enet_addr_cont->node);	/* Put it back */
1899	++(*p_counter);
1900
1901	hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
1902	return 0;
1903}
1904
1905static int ugeth_82xx_filtering_clear_addr_in_hash(struct ucc_geth_private *ugeth,
1906						   struct enet_addr *p_enet_addr)
1907{
1908	struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
1909	struct enet_addr_container *enet_addr_cont;
1910	struct ucc_fast_private *uccf;
1911	enum comm_dir comm_dir;
1912	u16 i, num;
1913	struct list_head *p_lh;
1914	u32 *addr_h, *addr_l;
1915	u8 *p_counter;
1916
1917	uccf = ugeth->uccf;
1918
1919	p_82xx_addr_filt =
1920	    (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
1921	    addressfiltering;
1922
1923	if (!
1924	    (enet_addr_cont =
1925	     ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
1926		return -ENOENT;
1927
1928	/* It's been found and removed from the CQ. */
1929	/* Now destroy its container */
1930	put_enet_addr_container(enet_addr_cont);
1931
1932	if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1933		addr_h = &(p_82xx_addr_filt->gaddr_h);
1934		addr_l = &(p_82xx_addr_filt->gaddr_l);
1935		p_lh = &ugeth->group_hash_q;
1936		p_counter = &(ugeth->numGroupAddrInHash);
1937	} else {
1938		addr_h = &(p_82xx_addr_filt->iaddr_h);
1939		addr_l = &(p_82xx_addr_filt->iaddr_l);
1940		p_lh = &ugeth->ind_hash_q;
1941		p_counter = &(ugeth->numIndAddrInHash);
1942	}
1943
1944	comm_dir = 0;
1945	if (uccf->enabled_tx)
1946		comm_dir |= COMM_DIR_TX;
1947	if (uccf->enabled_rx)
1948		comm_dir |= COMM_DIR_RX;
1949	if (comm_dir)
1950		ugeth_disable(ugeth, comm_dir);
1951
1952	/* Clear the hash table. */
1953	out_be32(addr_h, 0x00000000);
1954	out_be32(addr_l, 0x00000000);
1955
1956	/* Add all remaining CQ elements back into hash */
1957	num = --(*p_counter);
1958	for (i = 0; i < num; i++) {
1959		enet_addr_cont =
1960		    (struct enet_addr_container *)
1961		    ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
1962		hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
1963		enqueue(p_lh, &enet_addr_cont->node);	/* Put it back */
1964	}
1965
1966	if (comm_dir)
1967		ugeth_enable(ugeth, comm_dir);
1968
1969	return 0;
1970}
1971#endif /* CONFIG_UGETH_FILTERING */
1972
1973static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
1974						       ugeth,
1975						       enum enet_addr_type
1976						       enet_addr_type)
1977{
1978	struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
1979	struct ucc_fast_private *uccf;
1980	enum comm_dir comm_dir;
1981	struct list_head *p_lh;
1982	u16 i, num;
1983	u32 *addr_h, *addr_l;
1984	u8 *p_counter;
1985
1986	uccf = ugeth->uccf;
1987
1988	p_82xx_addr_filt =
1989	    (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
1990	    addressfiltering;
1991
1992	if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
1993		addr_h = &(p_82xx_addr_filt->gaddr_h);
1994		addr_l = &(p_82xx_addr_filt->gaddr_l);
1995		p_lh = &ugeth->group_hash_q;
1996		p_counter = &(ugeth->numGroupAddrInHash);
1997	} else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
1998		addr_h = &(p_82xx_addr_filt->iaddr_h);
1999		addr_l = &(p_82xx_addr_filt->iaddr_l);
2000		p_lh = &ugeth->ind_hash_q;
2001		p_counter = &(ugeth->numIndAddrInHash);
2002	} else
2003		return -EINVAL;
2004
2005	comm_dir = 0;
2006	if (uccf->enabled_tx)
2007		comm_dir |= COMM_DIR_TX;
2008	if (uccf->enabled_rx)
2009		comm_dir |= COMM_DIR_RX;
2010	if (comm_dir)
2011		ugeth_disable(ugeth, comm_dir);
2012
2013	/* Clear the hash table. */
2014	out_be32(addr_h, 0x00000000);
2015	out_be32(addr_l, 0x00000000);
2016
2017	if (!p_lh)
2018		return 0;
2019
2020	num = *p_counter;
2021
2022	/* Delete all remaining CQ elements */
2023	for (i = 0; i < num; i++)
2024		put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
2025
2026	*p_counter = 0;
2027
2028	if (comm_dir)
2029		ugeth_enable(ugeth, comm_dir);
2030
2031	return 0;
2032}
2033
2034#ifdef CONFIG_UGETH_FILTERING
2035static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth,
2036						  struct enet_addr *p_enet_addr,
2037						  u8 paddr_num)
2038{
2039	int i;
2040
2041	if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
2042		ugeth_warn
2043		    ("%s: multicast address added to paddr will have no "
2044		     "effect - is this what you wanted?",
2045		     __FUNCTION__);
2046
2047	ugeth->indAddrRegUsed[paddr_num] = 1;	/* mark this paddr as used */
2048	/* store address in our database */
2049	for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2050		ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
2051	/* put in hardware */
2052	return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
2053}
2054#endif /* CONFIG_UGETH_FILTERING */
2055
2056static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
2057						    u8 paddr_num)
2058{
2059	ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
2060	return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
2061}
2062
2063static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2064{
2065	u16 i, j;
2066	u8 *bd;
2067
2068	if (!ugeth)
2069		return;
2070
2071	if (ugeth->uccf)
2072		ucc_fast_free(ugeth->uccf);
2073
2074	if (ugeth->p_thread_data_tx) {
2075		qe_muram_free(ugeth->thread_dat_tx_offset);
2076		ugeth->p_thread_data_tx = NULL;
2077	}
2078	if (ugeth->p_thread_data_rx) {
2079		qe_muram_free(ugeth->thread_dat_rx_offset);
2080		ugeth->p_thread_data_rx = NULL;
2081	}
2082	if (ugeth->p_exf_glbl_param) {
2083		qe_muram_free(ugeth->exf_glbl_param_offset);
2084		ugeth->p_exf_glbl_param = NULL;
2085	}
2086	if (ugeth->p_rx_glbl_pram) {
2087		qe_muram_free(ugeth->rx_glbl_pram_offset);
2088		ugeth->p_rx_glbl_pram = NULL;
2089	}
2090	if (ugeth->p_tx_glbl_pram) {
2091		qe_muram_free(ugeth->tx_glbl_pram_offset);
2092		ugeth->p_tx_glbl_pram = NULL;
2093	}
2094	if (ugeth->p_send_q_mem_reg) {
2095		qe_muram_free(ugeth->send_q_mem_reg_offset);
2096		ugeth->p_send_q_mem_reg = NULL;
2097	}
2098	if (ugeth->p_scheduler) {
2099		qe_muram_free(ugeth->scheduler_offset);
2100		ugeth->p_scheduler = NULL;
2101	}
2102	if (ugeth->p_tx_fw_statistics_pram) {
2103		qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
2104		ugeth->p_tx_fw_statistics_pram = NULL;
2105	}
2106	if (ugeth->p_rx_fw_statistics_pram) {
2107		qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
2108		ugeth->p_rx_fw_statistics_pram = NULL;
2109	}
2110	if (ugeth->p_rx_irq_coalescing_tbl) {
2111		qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
2112		ugeth->p_rx_irq_coalescing_tbl = NULL;
2113	}
2114	if (ugeth->p_rx_bd_qs_tbl) {
2115		qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
2116		ugeth->p_rx_bd_qs_tbl = NULL;
2117	}
2118	if (ugeth->p_init_enet_param_shadow) {
2119		return_init_enet_entries(ugeth,
2120					 &(ugeth->p_init_enet_param_shadow->
2121					   rxthread[0]),
2122					 ENET_INIT_PARAM_MAX_ENTRIES_RX,
2123					 ugeth->ug_info->riscRx, 1);
2124		return_init_enet_entries(ugeth,
2125					 &(ugeth->p_init_enet_param_shadow->
2126					   txthread[0]),
2127					 ENET_INIT_PARAM_MAX_ENTRIES_TX,
2128					 ugeth->ug_info->riscTx, 0);
2129		kfree(ugeth->p_init_enet_param_shadow);
2130		ugeth->p_init_enet_param_shadow = NULL;
2131	}
2132	for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
2133		bd = ugeth->p_tx_bd_ring[i];
2134		if (!bd)
2135			continue;
2136		for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2137			if (ugeth->tx_skbuff[i][j]) {
2138				dma_unmap_single(NULL,
2139						 ((qe_bd_t *)bd)->buf,
2140						 (in_be32((u32 *)bd) &
2141						  BD_LENGTH_MASK),
2142						 DMA_TO_DEVICE);
2143				dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
2144				ugeth->tx_skbuff[i][j] = NULL;
2145			}
2146		}
2147
2148		kfree(ugeth->tx_skbuff[i]);
2149
2150		if (ugeth->p_tx_bd_ring[i]) {
2151			if (ugeth->ug_info->uf_info.bd_mem_part ==
2152			    MEM_PART_SYSTEM)
2153				kfree((void *)ugeth->tx_bd_ring_offset[i]);
2154			else if (ugeth->ug_info->uf_info.bd_mem_part ==
2155				 MEM_PART_MURAM)
2156				qe_muram_free(ugeth->tx_bd_ring_offset[i]);
2157			ugeth->p_tx_bd_ring[i] = NULL;
2158		}
2159	}
2160	for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
2161		if (ugeth->p_rx_bd_ring[i]) {
2162			/* Return existing data buffers in ring */
2163			bd = ugeth->p_rx_bd_ring[i];
2164			for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2165				if (ugeth->rx_skbuff[i][j]) {
2166					dma_unmap_single(NULL,
2167						((struct qe_bd *)bd)->buf,
2168						ugeth->ug_info->
2169						uf_info.max_rx_buf_length +
2170						UCC_GETH_RX_DATA_BUF_ALIGNMENT,
2171						DMA_FROM_DEVICE);
2172					dev_kfree_skb_any(
2173						ugeth->rx_skbuff[i][j]);
2174					ugeth->rx_skbuff[i][j] = NULL;
2175				}
2176				bd += sizeof(struct qe_bd);
2177			}
2178
2179			kfree(ugeth->rx_skbuff[i]);
2180
2181			if (ugeth->ug_info->uf_info.bd_mem_part ==
2182			    MEM_PART_SYSTEM)
2183				kfree((void *)ugeth->rx_bd_ring_offset[i]);
2184			else if (ugeth->ug_info->uf_info.bd_mem_part ==
2185				 MEM_PART_MURAM)
2186				qe_muram_free(ugeth->rx_bd_ring_offset[i]);
2187			ugeth->p_rx_bd_ring[i] = NULL;
2188		}
2189	}
2190	while (!list_empty(&ugeth->group_hash_q))
2191		put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2192					(dequeue(&ugeth->group_hash_q)));
2193	while (!list_empty(&ugeth->ind_hash_q))
2194		put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2195					(dequeue(&ugeth->ind_hash_q)));
2196
2197}
2198
2199static void ucc_geth_set_multi(struct net_device *dev)
2200{
2201	struct ucc_geth_private *ugeth;
2202	struct dev_mc_list *dmi;
2203	struct ucc_fast *uf_regs;
2204	struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
2205	u8 tempaddr[6];
2206	u8 *mcptr, *tdptr;
2207	int i, j;
2208
2209	ugeth = netdev_priv(dev);
2210
2211	uf_regs = ugeth->uccf->uf_regs;
2212
2213	if (dev->flags & IFF_PROMISC) {
2214
2215		uf_regs->upsmr |= UPSMR_PRO;
2216
2217	} else {
2218
2219		uf_regs->upsmr &= ~UPSMR_PRO;
2220
2221		p_82xx_addr_filt =
2222		    (struct ucc_geth_82xx_address_filtering_pram *) ugeth->
2223		    p_rx_glbl_pram->addressfiltering;
2224
2225		if (dev->flags & IFF_ALLMULTI) {
2226			/* Catch all multicast addresses, so set the
2227			 * filter to all 1's.
2228			 */
2229			out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
2230			out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
2231		} else {
2232			/* Clear filter and add the addresses in the list.
2233			 */
2234			out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2235			out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2236
2237			dmi = dev->mc_list;
2238
2239			for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
2240
2241				/* Only support group multicast for now.
2242				 */
2243				if (!(dmi->dmi_addr[0] & 1))
2244					continue;
2245
2246				/* The address in dmi_addr is LSB first,
2247				 * and taddr is MSB first.  We have to
2248				 * copy bytes MSB first from dmi_addr.
2249				 */
2250				mcptr = (u8 *) dmi->dmi_addr + 5;
2251				tdptr = (u8 *) tempaddr;
2252				for (j = 0; j < 6; j++)
2253					*tdptr++ = *mcptr--;
2254
2255				/* Ask CPM to run CRC and set bit in
2256				 * filter mask.
2257				 */
2258				hw_add_addr_in_hash(ugeth, tempaddr);
2259			}
2260		}
2261	}
2262}
2263
2264static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2265{
2266	struct ucc_geth *ug_regs = ugeth->ug_regs;
2267	struct phy_device *phydev = ugeth->phydev;
2268	u32 tempval;
2269
2270	ugeth_vdbg("%s: IN", __FUNCTION__);
2271
2272	/* Disable the controller */
2273	ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2274
2275	/* Tell the kernel the link is down */
2276	phy_stop(phydev);
2277
2278	/* Mask all interrupts */
2279	out_be32(ugeth->uccf->p_ucce, 0x00000000);
2280
2281	/* Clear all interrupts */
2282	out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2283
2284	/* Disable Rx and Tx */
2285	tempval = in_be32(&ug_regs->maccfg1);
2286	tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2287	out_be32(&ug_regs->maccfg1, tempval);
2288
2289	free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
2290
2291	ucc_geth_memclean(ugeth);
2292}
2293
2294static int ucc_struct_init(struct ucc_geth_private *ugeth)
2295{
2296	struct ucc_geth_info *ug_info;
2297	struct ucc_fast_info *uf_info;
2298	int i;
2299
2300	ug_info = ugeth->ug_info;
2301	uf_info = &ug_info->uf_info;
2302
2303	/* Create CQs for hash tables */
2304	INIT_LIST_HEAD(&ugeth->group_hash_q);
2305	INIT_LIST_HEAD(&ugeth->ind_hash_q);
2306
2307	if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2308	      (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2309		ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
2310		return -EINVAL;
2311	}
2312
2313	/* Rx BD lengths */
2314	for (i = 0; i < ug_info->numQueuesRx; i++) {
2315		if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2316		    (ug_info->bdRingLenRx[i] %
2317		     UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2318			ugeth_err
2319			    ("%s: Rx BD ring length must be multiple of 4,"
2320				" no smaller than 8.", __FUNCTION__);
2321			return -EINVAL;
2322		}
2323	}
2324
2325	/* Tx BD lengths */
2326	for (i = 0; i < ug_info->numQueuesTx; i++) {
2327		if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2328			ugeth_err
2329			    ("%s: Tx BD ring length must be no smaller than 2.",
2330			     __FUNCTION__);
2331			return -EINVAL;
2332		}
2333	}
2334
2335	/* mrblr */
2336	if ((uf_info->max_rx_buf_length == 0) ||
2337	    (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2338		ugeth_err
2339		    ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2340		     __FUNCTION__);
2341		return -EINVAL;
2342	}
2343
2344	/* num Tx queues */
2345	if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2346		ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
2347		return -EINVAL;
2348	}
2349
2350	/* num Rx queues */
2351	if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2352		ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
2353		return -EINVAL;
2354	}
2355
2356	/* l2qt */
2357	for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2358		if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2359			ugeth_err
2360			    ("%s: VLAN priority table entry must not be"
2361				" larger than number of Rx queues.",
2362			     __FUNCTION__);
2363			return -EINVAL;
2364		}
2365	}
2366
2367	/* l3qt */
2368	for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2369		if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2370			ugeth_err
2371			    ("%s: IP priority table entry must not be"
2372				" larger than number of Rx queues.",
2373			     __FUNCTION__);
2374			return -EINVAL;
2375		}
2376	}
2377
2378	if (ug_info->cam && !ug_info->ecamptr) {
2379		ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2380			  __FUNCTION__);
2381		return -EINVAL;
2382	}
2383
2384	if ((ug_info->numStationAddresses !=
2385	     UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2386	    && ug_info->rxExtendedFiltering) {
2387		ugeth_err("%s: Number of station addresses greater than 1 "
2388			  "not allowed in extended parsing mode.",
2389			  __FUNCTION__);
2390		return -EINVAL;
2391	}
2392
2393	/* Generate uccm_mask for receive */
2394	uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2395	for (i = 0; i < ug_info->numQueuesRx; i++)
2396		uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
2397
2398	for (i = 0; i < ug_info->numQueuesTx; i++)
2399		uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
2400	/* Initialize the general fast UCC block. */
2401	if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2402		ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
2403		ucc_geth_memclean(ugeth);
2404		return -ENOMEM;
2405	}
2406
2407	ugeth->ug_regs = (struct ucc_geth *) ioremap(uf_info->regs, sizeof(struct ucc_geth));
2408
2409	return 0;
2410}
2411
2412static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2413{
2414	struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
2415	struct ucc_geth_init_pram *p_init_enet_pram;
2416	struct ucc_fast_private *uccf;
2417	struct ucc_geth_info *ug_info;
2418	struct ucc_fast_info *uf_info;
2419	struct ucc_fast *uf_regs;
2420	struct ucc_geth *ug_regs;
2421	int ret_val = -EINVAL;
2422	u32 remoder = UCC_GETH_REMODER_INIT;
2423	u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
2424	u32 ifstat, i, j, size, l2qt, l3qt, length;
2425	u16 temoder = UCC_GETH_TEMODER_INIT;
2426	u16 test;
2427	u8 function_code = 0;
2428	u8 *bd, *endOfRing;
2429	u8 numThreadsRxNumerical, numThreadsTxNumerical;
2430
2431	ugeth_vdbg("%s: IN", __FUNCTION__);
2432	uccf = ugeth->uccf;
2433	ug_info = ugeth->ug_info;
2434	uf_info = &ug_info->uf_info;
2435	uf_regs = uccf->uf_regs;
2436	ug_regs = ugeth->ug_regs;
2437
2438	switch (ug_info->numThreadsRx) {
2439	case UCC_GETH_NUM_OF_THREADS_1:
2440		numThreadsRxNumerical = 1;
2441		break;
2442	case UCC_GETH_NUM_OF_THREADS_2:
2443		numThreadsRxNumerical = 2;
2444		break;
2445	case UCC_GETH_NUM_OF_THREADS_4:
2446		numThreadsRxNumerical = 4;
2447		break;
2448	case UCC_GETH_NUM_OF_THREADS_6:
2449		numThreadsRxNumerical = 6;
2450		break;
2451	case UCC_GETH_NUM_OF_THREADS_8:
2452		numThreadsRxNumerical = 8;
2453		break;
2454	default:
2455		ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
2456		ucc_geth_memclean(ugeth);
2457		return -EINVAL;
2458		break;
2459	}
2460
2461	switch (ug_info->numThreadsTx) {
2462	case UCC_GETH_NUM_OF_THREADS_1:
2463		numThreadsTxNumerical = 1;
2464		break;
2465	case UCC_GETH_NUM_OF_THREADS_2:
2466		numThreadsTxNumerical = 2;
2467		break;
2468	case UCC_GETH_NUM_OF_THREADS_4:
2469		numThreadsTxNumerical = 4;
2470		break;
2471	case UCC_GETH_NUM_OF_THREADS_6:
2472		numThreadsTxNumerical = 6;
2473		break;
2474	case UCC_GETH_NUM_OF_THREADS_8:
2475		numThreadsTxNumerical = 8;
2476		break;
2477	default:
2478		ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
2479		ucc_geth_memclean(ugeth);
2480		return -EINVAL;
2481		break;
2482	}
2483
2484	/* Calculate rx_extended_features */
2485	ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2486	    ug_info->ipAddressAlignment ||
2487	    (ug_info->numStationAddresses !=
2488	     UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2489
2490	ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2491	    (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2492	    || (ug_info->vlanOperationNonTagged !=
2493		UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2494
2495	init_default_reg_vals(&uf_regs->upsmr,
2496			      &ug_regs->maccfg1, &ug_regs->maccfg2);
2497
2498	/*                    Set UPSMR                      */
2499	/* For more details see the hardware spec.           */
2500	init_rx_parameters(ug_info->bro,
2501			   ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2502
2503	/* We're going to ignore other registers for now, */
2504	/* except as needed to get up and running         */
2505
2506	/*                    Set MACCFG1                    */
2507	/* For more details see the hardware spec.           */
2508	init_flow_control_params(ug_info->aufc,
2509				 ug_info->receiveFlowControl,
2510				 1,
2511				 ug_info->pausePeriod,
2512				 ug_info->extensionField,
2513				 &uf_regs->upsmr,
2514				 &ug_regs->uempr, &ug_regs->maccfg1);
2515
2516	maccfg1 = in_be32(&ug_regs->maccfg1);
2517	maccfg1 |= MACCFG1_ENABLE_RX;
2518	maccfg1 |= MACCFG1_ENABLE_TX;
2519	out_be32(&ug_regs->maccfg1, maccfg1);
2520
2521	/*                    Set IPGIFG                     */
2522	/* For more details see the hardware spec.           */
2523	ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2524					      ug_info->nonBackToBackIfgPart2,
2525					      ug_info->
2526					      miminumInterFrameGapEnforcement,
2527					      ug_info->backToBackInterFrameGap,
2528					      &ug_regs->ipgifg);
2529	if (ret_val != 0) {
2530		ugeth_err("%s: IPGIFG initialization parameter too large.",
2531			  __FUNCTION__);
2532		ucc_geth_memclean(ugeth);
2533		return ret_val;
2534	}
2535
2536	/*                    Set HAFDUP                     */
2537	/* For more details see the hardware spec.           */
2538	ret_val = init_half_duplex_params(ug_info->altBeb,
2539					  ug_info->backPressureNoBackoff,
2540					  ug_info->noBackoff,
2541					  ug_info->excessDefer,
2542					  ug_info->altBebTruncation,
2543					  ug_info->maxRetransmission,
2544					  ug_info->collisionWindow,
2545					  &ug_regs->hafdup);
2546	if (ret_val != 0) {
2547		ugeth_err("%s: Half Duplex initialization parameter too large.",
2548			  __FUNCTION__);
2549		ucc_geth_memclean(ugeth);
2550		return ret_val;
2551	}
2552
2553	/*                    Set IFSTAT                     */
2554	/* For more details see the hardware spec.           */
2555	/* Read only - resets upon read                      */
2556	ifstat = in_be32(&ug_regs->ifstat);
2557
2558	/*                    Clear UEMPR                    */
2559	/* For more details see the hardware spec.           */
2560	out_be32(&ug_regs->uempr, 0);
2561
2562	/*                    Set UESCR                      */
2563	/* For more details see the hardware spec.           */
2564	init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2565				UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2566				0, &uf_regs->upsmr, &ug_regs->uescr);
2567
2568	/* Allocate Tx bds */
2569	for (j = 0; j < ug_info->numQueuesTx; j++) {
2570		/* Allocate in multiple of
2571		   UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2572		   according to spec */
2573		length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
2574			  / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2575		    * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2576		if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
2577		    UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2578			length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2579		if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2580			u32 align = 4;
2581			if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2582				align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2583			ugeth->tx_bd_ring_offset[j] =
2584				kmalloc((u32) (length + align), GFP_KERNEL);
2585
2586			if (ugeth->tx_bd_ring_offset[j] != 0)
2587				ugeth->p_tx_bd_ring[j] =
2588					(void*)((ugeth->tx_bd_ring_offset[j] +
2589					align) & ~(align - 1));
2590		} else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2591			ugeth->tx_bd_ring_offset[j] =
2592			    qe_muram_alloc(length,
2593					   UCC_GETH_TX_BD_RING_ALIGNMENT);
2594			if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
2595				ugeth->p_tx_bd_ring[j] =
2596				    (u8 *) qe_muram_addr(ugeth->
2597							 tx_bd_ring_offset[j]);
2598		}
2599		if (!ugeth->p_tx_bd_ring[j]) {
2600			ugeth_err
2601			    ("%s: Can not allocate memory for Tx bd rings.",
2602			     __FUNCTION__);
2603			ucc_geth_memclean(ugeth);
2604			return -ENOMEM;
2605		}
2606		/* Zero unused end of bd ring, according to spec */
2607		memset(ugeth->p_tx_bd_ring[j] +
2608		       ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0,
2609		       length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
2610	}
2611
2612	/* Allocate Rx bds */
2613	for (j = 0; j < ug_info->numQueuesRx; j++) {
2614		length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
2615		if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2616			u32 align = 4;
2617			if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2618				align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2619			ugeth->rx_bd_ring_offset[j] =
2620				kmalloc((u32) (length + align), GFP_KERNEL);
2621			if (ugeth->rx_bd_ring_offset[j] != 0)
2622				ugeth->p_rx_bd_ring[j] =
2623					(void*)((ugeth->rx_bd_ring_offset[j] +
2624					align) & ~(align - 1));
2625		} else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2626			ugeth->rx_bd_ring_offset[j] =
2627			    qe_muram_alloc(length,
2628					   UCC_GETH_RX_BD_RING_ALIGNMENT);
2629			if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
2630				ugeth->p_rx_bd_ring[j] =
2631				    (u8 *) qe_muram_addr(ugeth->
2632							 rx_bd_ring_offset[j]);
2633		}
2634		if (!ugeth->p_rx_bd_ring[j]) {
2635			ugeth_err
2636			    ("%s: Can not allocate memory for Rx bd rings.",
2637			     __FUNCTION__);
2638			ucc_geth_memclean(ugeth);
2639			return -ENOMEM;
2640		}
2641	}
2642
2643	/* Init Tx bds */
2644	for (j = 0; j < ug_info->numQueuesTx; j++) {
2645		/* Setup the skbuff rings */
2646		ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2647					      ugeth->ug_info->bdRingLenTx[j],
2648					      GFP_KERNEL);
2649
2650		if (ugeth->tx_skbuff[j] == NULL) {
2651			ugeth_err("%s: Could not allocate tx_skbuff",
2652				  __FUNCTION__);
2653			ucc_geth_memclean(ugeth);
2654			return -ENOMEM;
2655		}
2656
2657		for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2658			ugeth->tx_skbuff[j][i] = NULL;
2659
2660		ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2661		bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2662		for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2663			/* clear bd buffer */
2664			out_be32(&((struct qe_bd *)bd)->buf, 0);
2665			/* set bd status and length */
2666			out_be32((u32 *)bd, 0);
2667			bd += sizeof(struct qe_bd);
2668		}
2669		bd -= sizeof(struct qe_bd);
2670		/* set bd status and length */
2671		out_be32((u32 *)bd, T_W);	/* for last BD set Wrap bit */
2672	}
2673
2674	/* Init Rx bds */
2675	for (j = 0; j < ug_info->numQueuesRx; j++) {
2676		/* Setup the skbuff rings */
2677		ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2678					      ugeth->ug_info->bdRingLenRx[j],
2679					      GFP_KERNEL);
2680
2681		if (ugeth->rx_skbuff[j] == NULL) {
2682			ugeth_err("%s: Could not allocate rx_skbuff",
2683				  __FUNCTION__);
2684			ucc_geth_memclean(ugeth);
2685			return -ENOMEM;
2686		}
2687
2688		for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2689			ugeth->rx_skbuff[j][i] = NULL;
2690
2691		ugeth->skb_currx[j] = 0;
2692		bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2693		for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2694			/* set bd status and length */
2695			out_be32((u32 *)bd, R_I);
2696			/* clear bd buffer */
2697			out_be32(&((struct qe_bd *)bd)->buf, 0);
2698			bd += sizeof(struct qe_bd);
2699		}
2700		bd -= sizeof(struct qe_bd);
2701		/* set bd status and length */
2702		out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */
2703	}
2704
2705	/*
2706	 * Global PRAM
2707	 */
2708	/* Tx global PRAM */
2709	/* Allocate global tx parameter RAM page */
2710	ugeth->tx_glbl_pram_offset =
2711	    qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
2712			   UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
2713	if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
2714		ugeth_err
2715		    ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2716		     __FUNCTION__);
2717		ucc_geth_memclean(ugeth);
2718		return -ENOMEM;
2719	}
2720	ugeth->p_tx_glbl_pram =
2721	    (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth->
2722							tx_glbl_pram_offset);
2723	/* Zero out p_tx_glbl_pram */
2724	memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
2725
2726	/* Fill global PRAM */
2727
2728	/* TQPTR */
2729	/* Size varies with number of Tx threads */
2730	ugeth->thread_dat_tx_offset =
2731	    qe_muram_alloc(numThreadsTxNumerical *
2732			   sizeof(struct ucc_geth_thread_data_tx) +
2733			   32 * (numThreadsTxNumerical == 1),
2734			   UCC_GETH_THREAD_DATA_ALIGNMENT);
2735	if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
2736		ugeth_err
2737		    ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2738		     __FUNCTION__);
2739		ucc_geth_memclean(ugeth);
2740		return -ENOMEM;
2741	}
2742
2743	ugeth->p_thread_data_tx =
2744	    (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth->
2745							thread_dat_tx_offset);
2746	out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
2747
2748	/* vtagtable */
2749	for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
2750		out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
2751			 ug_info->vtagtable[i]);
2752
2753	/* iphoffset */
2754	for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
2755		ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
2756
2757	/* SQPTR */
2758	/* Size varies with number of Tx queues */
2759	ugeth->send_q_mem_reg_offset =
2760	    qe_muram_alloc(ug_info->numQueuesTx *
2761			   sizeof(struct ucc_geth_send_queue_qd),
2762			   UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
2763	if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
2764		ugeth_err
2765		    ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2766		     __FUNCTION__);
2767		ucc_geth_memclean(ugeth);
2768		return -ENOMEM;
2769	}
2770
2771	ugeth->p_send_q_mem_reg =
2772	    (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth->
2773			send_q_mem_reg_offset);
2774	out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
2775
2776	/* Setup the table */
2777	/* Assume BD rings are already established */
2778	for (i = 0; i < ug_info->numQueuesTx; i++) {
2779		endOfRing =
2780		    ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
2781					      1) * sizeof(struct qe_bd);
2782		if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
2783			out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2784				 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
2785			out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2786				 last_bd_completed_address,
2787				 (u32) virt_to_phys(endOfRing));
2788		} else if (ugeth->ug_info->uf_info.bd_mem_part ==
2789			   MEM_PART_MURAM) {
2790			out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2791				 (u32) immrbar_virt_to_phys(ugeth->
2792							    p_tx_bd_ring[i]));
2793			out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2794				 last_bd_completed_address,
2795				 (u32) immrbar_virt_to_phys(endOfRing));
2796		}
2797	}
2798
2799	/* schedulerbasepointer */
2800
2801	if (ug_info->numQueuesTx > 1) {
2802	/* scheduler exists only if more than 1 tx queue */
2803		ugeth->scheduler_offset =
2804		    qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
2805				   UCC_GETH_SCHEDULER_ALIGNMENT);
2806		if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
2807			ugeth_err
2808			 ("%s: Can not allocate DPRAM memory for p_scheduler.",
2809			     __FUNCTION__);
2810			ucc_geth_memclean(ugeth);
2811			return -ENOMEM;
2812		}
2813
2814		ugeth->p_scheduler =
2815		    (struct ucc_geth_scheduler *) qe_muram_addr(ugeth->
2816							   scheduler_offset);
2817		out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
2818			 ugeth->scheduler_offset);
2819		/* Zero out p_scheduler */
2820		memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
2821
2822		/* Set values in scheduler */
2823		out_be32(&ugeth->p_scheduler->mblinterval,
2824			 ug_info->mblinterval);
2825		out_be16(&ugeth->p_scheduler->nortsrbytetime,
2826			 ug_info->nortsrbytetime);
2827		ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
2828		ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
2829		ugeth->p_scheduler->txasap = ug_info->txasap;
2830		ugeth->p_scheduler->extrabw = ug_info->extrabw;
2831		for (i = 0; i < NUM_TX_QUEUES; i++)
2832			ugeth->p_scheduler->weightfactor[i] =
2833			    ug_info->weightfactor[i];
2834
2835		/* Set pointers to cpucount registers in scheduler */
2836		ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
2837		ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
2838		ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
2839		ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
2840		ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
2841		ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
2842		ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
2843		ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
2844	}
2845
2846	/* schedulerbasepointer */
2847	/* TxRMON_PTR (statistics) */
2848	if (ug_info->
2849	    statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
2850		ugeth->tx_fw_statistics_pram_offset =
2851		    qe_muram_alloc(sizeof
2852				   (struct ucc_geth_tx_firmware_statistics_pram),
2853				   UCC_GETH_TX_STATISTICS_ALIGNMENT);
2854		if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
2855			ugeth_err
2856			    ("%s: Can not allocate DPRAM memory for"
2857				" p_tx_fw_statistics_pram.", __FUNCTION__);
2858			ucc_geth_memclean(ugeth);
2859			return -ENOMEM;
2860		}
2861		ugeth->p_tx_fw_statistics_pram =
2862		    (struct ucc_geth_tx_firmware_statistics_pram *)
2863		    qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
2864		/* Zero out p_tx_fw_statistics_pram */
2865		memset(ugeth->p_tx_fw_statistics_pram,
2866		       0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
2867	}
2868
2869	/* temoder */
2870	/* Already has speed set */
2871
2872	if (ug_info->numQueuesTx > 1)
2873		temoder |= TEMODER_SCHEDULER_ENABLE;
2874	if (ug_info->ipCheckSumGenerate)
2875		temoder |= TEMODER_IP_CHECKSUM_GENERATE;
2876	temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
2877	out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
2878
2879	test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
2880
2881	/* Function code register value to be used later */
2882	function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL;
2883	/* Required for QE */
2884
2885	/* function code register */
2886	out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
2887
2888	/* Rx global PRAM */
2889	/* Allocate global rx parameter RAM page */
2890	ugeth->rx_glbl_pram_offset =
2891	    qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
2892			   UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
2893	if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
2894		ugeth_err
2895		    ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
2896		     __FUNCTION__);
2897		ucc_geth_memclean(ugeth);
2898		return -ENOMEM;
2899	}
2900	ugeth->p_rx_glbl_pram =
2901	    (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth->
2902							rx_glbl_pram_offset);
2903	/* Zero out p_rx_glbl_pram */
2904	memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
2905
2906	/* Fill global PRAM */
2907
2908	/* RQPTR */
2909	/* Size varies with number of Rx threads */
2910	ugeth->thread_dat_rx_offset =
2911	    qe_muram_alloc(numThreadsRxNumerical *
2912			   sizeof(struct ucc_geth_thread_data_rx),
2913			   UCC_GETH_THREAD_DATA_ALIGNMENT);
2914	if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
2915		ugeth_err
2916		    ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
2917		     __FUNCTION__);
2918		ucc_geth_memclean(ugeth);
2919		return -ENOMEM;
2920	}
2921
2922	ugeth->p_thread_data_rx =
2923	    (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth->
2924							thread_dat_rx_offset);
2925	out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
2926
2927	/* typeorlen */
2928	out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
2929
2930	/* rxrmonbaseptr (statistics) */
2931	if (ug_info->
2932	    statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
2933		ugeth->rx_fw_statistics_pram_offset =
2934		    qe_muram_alloc(sizeof
2935				   (struct ucc_geth_rx_firmware_statistics_pram),
2936				   UCC_GETH_RX_STATISTICS_ALIGNMENT);
2937		if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
2938			ugeth_err
2939				("%s: Can not allocate DPRAM memory for"
2940				" p_rx_fw_statistics_pram.", __FUNCTION__);
2941			ucc_geth_memclean(ugeth);
2942			return -ENOMEM;
2943		}
2944		ugeth->p_rx_fw_statistics_pram =
2945		    (struct ucc_geth_rx_firmware_statistics_pram *)
2946		    qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
2947		/* Zero out p_rx_fw_statistics_pram */
2948		memset(ugeth->p_rx_fw_statistics_pram, 0,
2949		       sizeof(struct ucc_geth_rx_firmware_statistics_pram));
2950	}
2951
2952	/* intCoalescingPtr */
2953
2954	/* Size varies with number of Rx queues */
2955	ugeth->rx_irq_coalescing_tbl_offset =
2956	    qe_muram_alloc(ug_info->numQueuesRx *
2957			   sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
2958			   + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
2959	if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
2960		ugeth_err
2961		    ("%s: Can not allocate DPRAM memory for"
2962			" p_rx_irq_coalescing_tbl.", __FUNCTION__);
2963		ucc_geth_memclean(ugeth);
2964		return -ENOMEM;
2965	}
2966
2967	ugeth->p_rx_irq_coalescing_tbl =
2968	    (struct ucc_geth_rx_interrupt_coalescing_table *)
2969	    qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
2970	out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
2971		 ugeth->rx_irq_coalescing_tbl_offset);
2972
2973	/* Fill interrupt coalescing table */
2974	for (i = 0; i < ug_info->numQueuesRx; i++) {
2975		out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
2976			 interruptcoalescingmaxvalue,
2977			 ug_info->interruptcoalescingmaxvalue[i]);
2978		out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
2979			 interruptcoalescingcounter,
2980			 ug_info->interruptcoalescingmaxvalue[i]);
2981	}
2982
2983	/* MRBLR */
2984	init_max_rx_buff_len(uf_info->max_rx_buf_length,
2985			     &ugeth->p_rx_glbl_pram->mrblr);
2986	/* MFLR */
2987	out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
2988	/* MINFLR */
2989	init_min_frame_len(ug_info->minFrameLength,
2990			   &ugeth->p_rx_glbl_pram->minflr,
2991			   &ugeth->p_rx_glbl_pram->mrblr);
2992	/* MAXD1 */
2993	out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
2994	/* MAXD2 */
2995	out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
2996
2997	/* l2qt */
2998	l2qt = 0;
2999	for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
3000		l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
3001	out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
3002
3003	/* l3qt */
3004	for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
3005		l3qt = 0;
3006		for (i = 0; i < 8; i++)
3007			l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
3008		out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
3009	}
3010
3011	/* vlantype */
3012	out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
3013
3014	/* vlantci */
3015	out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
3016
3017	/* ecamptr */
3018	out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
3019
3020	/* RBDQPTR */
3021	/* Size varies with number of Rx queues */
3022	ugeth->rx_bd_qs_tbl_offset =
3023	    qe_muram_alloc(ug_info->numQueuesRx *
3024			   (sizeof(struct ucc_geth_rx_bd_queues_entry) +
3025			    sizeof(struct ucc_geth_rx_prefetched_bds)),
3026			   UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
3027	if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
3028		ugeth_err
3029		    ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3030		     __FUNCTION__);
3031		ucc_geth_memclean(ugeth);
3032		return -ENOMEM;
3033	}
3034
3035	ugeth->p_rx_bd_qs_tbl =
3036	    (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth->
3037				    rx_bd_qs_tbl_offset);
3038	out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3039	/* Zero out p_rx_bd_qs_tbl */
3040	memset(ugeth->p_rx_bd_qs_tbl,
3041	       0,
3042	       ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
3043				       sizeof(struct ucc_geth_rx_prefetched_bds)));
3044
3045	/* Setup the table */
3046	/* Assume BD rings are already established */
3047	for (i = 0; i < ug_info->numQueuesRx; i++) {
3048		if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3049			out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3050				 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
3051		} else if (ugeth->ug_info->uf_info.bd_mem_part ==
3052			   MEM_PART_MURAM) {
3053			out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3054				 (u32) immrbar_virt_to_phys(ugeth->
3055							    p_rx_bd_ring[i]));
3056		}
3057		/* rest of fields handled by QE */
3058	}
3059
3060	/* remoder */
3061	/* Already has speed set */
3062
3063	if (ugeth->rx_extended_features)
3064		remoder |= REMODER_RX_EXTENDED_FEATURES;
3065	if (ug_info->rxExtendedFiltering)
3066		remoder |= REMODER_RX_EXTENDED_FILTERING;
3067	if (ug_info->dynamicMaxFrameLength)
3068		remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
3069	if (ug_info->dynamicMinFrameLength)
3070		remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
3071	remoder |=
3072	    ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
3073	remoder |=
3074	    ug_info->
3075	    vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
3076	remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
3077	remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
3078	if (ug_info->ipCheckSumCheck)
3079		remoder |= REMODER_IP_CHECKSUM_CHECK;
3080	if (ug_info->ipAddressAlignment)
3081		remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
3082	out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
3083
3084	/* Note that this function must be called */
3085	/* ONLY AFTER p_tx_fw_statistics_pram */
3086	/* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
3087	init_firmware_statistics_gathering_mode((ug_info->
3088		statisticsMode &
3089		UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
3090		(ug_info->statisticsMode &
3091		UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
3092		&ugeth->p_tx_glbl_pram->txrmonbaseptr,
3093		ugeth->tx_fw_statistics_pram_offset,
3094		&ugeth->p_rx_glbl_pram->rxrmonbaseptr,
3095		ugeth->rx_fw_statistics_pram_offset,
3096		&ugeth->p_tx_glbl_pram->temoder,
3097		&ugeth->p_rx_glbl_pram->remoder);
3098
3099	/* function code register */
3100	ugeth->p_rx_glbl_pram->rstate = function_code;
3101
3102	/* initialize extended filtering */
3103	if (ug_info->rxExtendedFiltering) {
3104		if (!ug_info->extendedFilteringChainPointer) {
3105			ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3106				  __FUNCTION__);
3107			ucc_geth_memclean(ugeth);
3108			return -EINVAL;
3109		}
3110
3111		/* Allocate memory for extended filtering Mode Global
3112		Parameters */
3113		ugeth->exf_glbl_param_offset =
3114		    qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
3115		UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
3116		if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
3117			ugeth_err
3118				("%s: Can not allocate DPRAM memory for"
3119				" p_exf_glbl_param.", __FUNCTION__);
3120			ucc_geth_memclean(ugeth);
3121			return -ENOMEM;
3122		}
3123
3124		ugeth->p_exf_glbl_param =
3125		    (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth->
3126				 exf_glbl_param_offset);
3127		out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3128			 ugeth->exf_glbl_param_offset);
3129		out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
3130			 (u32) ug_info->extendedFilteringChainPointer);
3131
3132	} else {		/* initialize 82xx style address filtering */
3133
3134		/* Init individual address recognition registers to disabled */
3135
3136		for (j = 0; j < NUM_OF_PADDRS; j++)
3137			ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3138
3139		p_82xx_addr_filt =
3140		    (struct ucc_geth_82xx_address_filtering_pram *) ugeth->
3141		    p_rx_glbl_pram->addressfiltering;
3142
3143		ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3144			ENET_ADDR_TYPE_GROUP);
3145		ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3146			ENET_ADDR_TYPE_INDIVIDUAL);
3147	}
3148
3149	/*
3150	 * Initialize UCC at QE level
3151	 */
3152
3153	command = QE_INIT_TX_RX;
3154
3155	/* Allocate shadow InitEnet command parameter structure.
3156	 * This is needed because after the InitEnet command is executed,
3157	 * the structure in DPRAM is released, because DPRAM is a premium
3158	 * resource.
3159	 * This shadow structure keeps a copy of what was done so that the
3160	 * allocated resources can be released when the channel is freed.
3161	 */
3162	if (!(ugeth->p_init_enet_param_shadow =
3163	      kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
3164		ugeth_err
3165		    ("%s: Can not allocate memory for"
3166			" p_UccInitEnetParamShadows.", __FUNCTION__);
3167		ucc_geth_memclean(ugeth);
3168		return -ENOMEM;
3169	}
3170	/* Zero out *p_init_enet_param_shadow */
3171	memset((char *)ugeth->p_init_enet_param_shadow,
3172	       0, sizeof(struct ucc_geth_init_pram));
3173
3174	/* Fill shadow InitEnet command parameter structure */
3175
3176	ugeth->p_init_enet_param_shadow->resinit1 =
3177	    ENET_INIT_PARAM_MAGIC_RES_INIT1;
3178	ugeth->p_init_enet_param_shadow->resinit2 =
3179	    ENET_INIT_PARAM_MAGIC_RES_INIT2;
3180	ugeth->p_init_enet_param_shadow->resinit3 =
3181	    ENET_INIT_PARAM_MAGIC_RES_INIT3;
3182	ugeth->p_init_enet_param_shadow->resinit4 =
3183	    ENET_INIT_PARAM_MAGIC_RES_INIT4;
3184	ugeth->p_init_enet_param_shadow->resinit5 =
3185	    ENET_INIT_PARAM_MAGIC_RES_INIT5;
3186	ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3187	    ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
3188	ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3189	    ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
3190
3191	ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3192	    ugeth->rx_glbl_pram_offset | ug_info->riscRx;
3193	if ((ug_info->largestexternallookupkeysize !=
3194	     QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
3195	    && (ug_info->largestexternallookupkeysize !=
3196		QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3197	    && (ug_info->largestexternallookupkeysize !=
3198		QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3199		ugeth_err("%s: Invalid largest External Lookup Key Size.",
3200			  __FUNCTION__);
3201		ucc_geth_memclean(ugeth);
3202		return -EINVAL;
3203	}
3204	ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
3205	    ug_info->largestexternallookupkeysize;
3206	size = sizeof(struct ucc_geth_thread_rx_pram);
3207	if (ug_info->rxExtendedFiltering) {
3208		size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
3209		if (ug_info->largestexternallookupkeysize ==
3210		    QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3211			size +=
3212			    THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
3213		if (ug_info->largestexternallookupkeysize ==
3214		    QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
3215			size +=
3216			    THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3217	}
3218
3219	if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
3220		p_init_enet_param_shadow->rxthread[0]),
3221		(u8) (numThreadsRxNumerical + 1)
3222		/* Rx needs one extra for terminator */
3223		, size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3224		ug_info->riscRx, 1)) != 0) {
3225			ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3226				__FUNCTION__);
3227		ucc_geth_memclean(ugeth);
3228		return ret_val;
3229	}
3230
3231	ugeth->p_init_enet_param_shadow->txglobal =
3232	    ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3233	if ((ret_val =
3234	     fill_init_enet_entries(ugeth,
3235				    &(ugeth->p_init_enet_param_shadow->
3236				      txthread[0]), numThreadsTxNumerical,
3237				    sizeof(struct ucc_geth_thread_tx_pram),
3238				    UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3239				    ug_info->riscTx, 0)) != 0) {
3240		ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3241			  __FUNCTION__);
3242		ucc_geth_memclean(ugeth);
3243		return ret_val;
3244	}
3245
3246	/* Load Rx bds with buffers */
3247	for (i = 0; i < ug_info->numQueuesRx; i++) {
3248		if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3249			ugeth_err("%s: Can not fill Rx bds with buffers.",
3250				  __FUNCTION__);
3251			ucc_geth_memclean(ugeth);
3252			return ret_val;
3253		}
3254	}
3255
3256	/* Allocate InitEnet command parameter structure */
3257	init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
3258	if (IS_ERR_VALUE(init_enet_pram_offset)) {
3259		ugeth_err
3260		    ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3261		     __FUNCTION__);
3262		ucc_geth_memclean(ugeth);
3263		return -ENOMEM;
3264	}
3265	p_init_enet_pram =
3266	    (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset);
3267
3268	/* Copy shadow InitEnet command parameter structure into PRAM */
3269	p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
3270	p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
3271	p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
3272	p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
3273	out_be16(&p_init_enet_pram->resinit5,
3274		 ugeth->p_init_enet_param_shadow->resinit5);
3275	p_init_enet_pram->largestexternallookupkeysize =
3276	    ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
3277	out_be32(&p_init_enet_pram->rgftgfrxglobal,
3278		 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3279	for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3280		out_be32(&p_init_enet_pram->rxthread[i],
3281			 ugeth->p_init_enet_param_shadow->rxthread[i]);
3282	out_be32(&p_init_enet_pram->txglobal,
3283		 ugeth->p_init_enet_param_shadow->txglobal);
3284	for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3285		out_be32(&p_init_enet_pram->txthread[i],
3286			 ugeth->p_init_enet_param_shadow->txthread[i]);
3287
3288	/* Issue QE command */
3289	cecr_subblock =
3290	    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3291	qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
3292		     init_enet_pram_offset);
3293
3294	/* Free InitEnet command parameter */
3295	qe_muram_free(init_enet_pram_offset);
3296
3297	return 0;
3298}
3299
3300/* returns a net_device_stats structure pointer */
3301static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
3302{
3303	struct ucc_geth_private *ugeth = netdev_priv(dev);
3304
3305	return &(ugeth->stats);
3306}
3307
3308/* ucc_geth_timeout gets called when a packet has not been
3309 * transmitted after a set amount of time.
3310 * For now, assume that clearing out all the structures, and
3311 * starting over will fix the problem. */
3312static void ucc_geth_timeout(struct net_device *dev)
3313{
3314	struct ucc_geth_private *ugeth = netdev_priv(dev);
3315
3316	ugeth_vdbg("%s: IN", __FUNCTION__);
3317
3318	ugeth->stats.tx_errors++;
3319
3320	ugeth_dump_regs(ugeth);
3321
3322	if (dev->flags & IFF_UP) {
3323		ucc_geth_stop(ugeth);
3324		ucc_geth_startup(ugeth);
3325	}
3326
3327	netif_schedule(dev);
3328}
3329
3330/* This is called by the kernel when a frame is ready for transmission. */
3331/* It is pointed to by the dev->hard_start_xmit function pointer */
3332static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3333{
3334	struct ucc_geth_private *ugeth = netdev_priv(dev);
3335#ifdef CONFIG_UGETH_TX_ON_DEMAND
3336	struct ucc_fast_private *uccf;
3337#endif
3338	u8 *bd;			/* BD pointer */
3339	u32 bd_status;
3340	u8 txQ = 0;
3341
3342	ugeth_vdbg("%s: IN", __FUNCTION__);
3343
3344	spin_lock_irq(&ugeth->lock);
3345
3346	ugeth->stats.tx_bytes += skb->len;
3347
3348	/* Start from the next BD that should be filled */
3349	bd = ugeth->txBd[txQ];
3350	bd_status = in_be32((u32 *)bd);
3351	/* Save the skb pointer so we can free it later */
3352	ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3353
3354	/* Update the current skb pointer (wrapping if this was the last) */
3355	ugeth->skb_curtx[txQ] =
3356	    (ugeth->skb_curtx[txQ] +
3357	     1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3358
3359	/* set up the buffer descriptor */
3360	out_be32(&((struct qe_bd *)bd)->buf,
3361		      dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3362
3363	/* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
3364
3365	bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3366
3367	/* set bd status and length */
3368	out_be32((u32 *)bd, bd_status);
3369
3370	dev->trans_start = jiffies;
3371
3372	/* Move to next BD in the ring */
3373	if (!(bd_status & T_W))
3374		bd += sizeof(struct qe_bd);
3375	else
3376		bd = ugeth->p_tx_bd_ring[txQ];
3377
3378	/* If the next BD still needs to be cleaned up, then the bds
3379	   are full.  We need to tell the kernel to stop sending us stuff. */
3380	if (bd == ugeth->confBd[txQ]) {
3381		if (!netif_queue_stopped(dev))
3382			netif_stop_queue(dev);
3383	}
3384
3385	ugeth->txBd[txQ] = bd;
3386
3387	if (ugeth->p_scheduler) {
3388		ugeth->cpucount[txQ]++;
3389		/* Indicate to QE that there are more Tx bds ready for
3390		transmission */
3391		/* This is done by writing a running counter of the bd
3392		count to the scheduler PRAM. */
3393		out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3394	}
3395
3396#ifdef CONFIG_UGETH_TX_ON_DEMAND
3397	uccf = ugeth->uccf;
3398	out_be16(uccf->p_utodr, UCC_FAST_TOD);
3399#endif
3400	spin_unlock_irq(&ugeth->lock);
3401
3402	return 0;
3403}
3404
3405static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
3406{
3407	struct sk_buff *skb;
3408	u8 *bd;
3409	u16 length, howmany = 0;
3410	u32 bd_status;
3411	u8 *bdBuffer;
3412
3413	ugeth_vdbg("%s: IN", __FUNCTION__);
3414
3415	/* collect received buffers */
3416	bd = ugeth->rxBd[rxQ];
3417
3418	bd_status = in_be32((u32 *)bd);
3419
3420	/* while there are received buffers and BD is full (~R_E) */
3421	while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3422		bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf);
3423		length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3424		skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3425
3426		/* determine whether buffer is first, last, first and last
3427		(single buffer frame) or middle (not first and not last) */
3428		if (!skb ||
3429		    (!(bd_status & (R_F | R_L))) ||
3430		    (bd_status & R_ERRORS_FATAL)) {
3431			ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
3432				   __FUNCTION__, __LINE__, (u32) skb);
3433			if (skb)
3434				dev_kfree_skb_any(skb);
3435
3436			ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3437			ugeth->stats.rx_dropped++;
3438		} else {
3439			ugeth->stats.rx_packets++;
3440			howmany++;
3441
3442			/* Prep the skb for the packet */
3443			skb_put(skb, length);
3444
3445			/* Tell the skb what kind of packet this is */
3446			skb->protocol = eth_type_trans(skb, ugeth->dev);
3447
3448			ugeth->stats.rx_bytes += length;
3449			/* Send the packet up the stack */
3450#ifdef CONFIG_UGETH_NAPI
3451			netif_receive_skb(skb);
3452#else
3453			netif_rx(skb);
3454#endif				/* CONFIG_UGETH_NAPI */
3455		}
3456
3457		ugeth->dev->last_rx = jiffies;
3458
3459		skb = get_new_skb(ugeth, bd);
3460		if (!skb) {
3461			ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
3462			ugeth->stats.rx_dropped++;
3463			break;
3464		}
3465
3466		ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3467
3468		/* update to point at the next skb */
3469		ugeth->skb_currx[rxQ] =
3470		    (ugeth->skb_currx[rxQ] +
3471		     1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3472
3473		if (bd_status & R_W)
3474			bd = ugeth->p_rx_bd_ring[rxQ];
3475		else
3476			bd += sizeof(struct qe_bd);
3477
3478		bd_status = in_be32((u32 *)bd);
3479	}
3480
3481	ugeth->rxBd[rxQ] = bd;
3482	return howmany;
3483}
3484
3485static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3486{
3487	/* Start from the next BD that should be filled */
3488	struct ucc_geth_private *ugeth = netdev_priv(dev);
3489	u8 *bd;			/* BD pointer */
3490	u32 bd_status;
3491
3492	bd = ugeth->confBd[txQ];
3493	bd_status = in_be32((u32 *)bd);
3494
3495	/* Normal processing. */
3496	while ((bd_status & T_R) == 0) {
3497		/* BD contains already transmitted buffer.   */
3498		/* Handle the transmitted buffer and release */
3499		/* the BD to be used with the current frame  */
3500
3501		if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3502			break;
3503
3504		ugeth->stats.tx_packets++;
3505
3506		/* Free the sk buffer associated with this TxBD */
3507		dev_kfree_skb_irq(ugeth->
3508				  tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3509		ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3510		ugeth->skb_dirtytx[txQ] =
3511		    (ugeth->skb_dirtytx[txQ] +
3512		     1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3513
3514		/* We freed a buffer, so now we can restart transmission */
3515		if (netif_queue_stopped(dev))
3516			netif_wake_queue(dev);
3517
3518		/* Advance the confirmation BD pointer */
3519		if (!(bd_status & T_W))
3520			bd += sizeof(struct qe_bd);
3521		else
3522			bd = ugeth->p_tx_bd_ring[txQ];
3523		bd_status = in_be32((u32 *)bd);
3524	}
3525	ugeth->confBd[txQ] = bd;
3526	return 0;
3527}
3528
3529#ifdef CONFIG_UGETH_NAPI
3530static int ucc_geth_poll(struct net_device *dev, int *budget)
3531{
3532	struct ucc_geth_private *ugeth = netdev_priv(dev);
3533	struct ucc_geth_info *ug_info;
3534	struct ucc_fast_private *uccf;
3535	int howmany;
3536	u8 i;
3537	int rx_work_limit;
3538	register u32 uccm;
3539
3540	ug_info = ugeth->ug_info;
3541
3542	rx_work_limit = *budget;
3543	if (rx_work_limit > dev->quota)
3544		rx_work_limit = dev->quota;
3545
3546	howmany = 0;
3547
3548	for (i = 0; i < ug_info->numQueuesRx; i++) {
3549		howmany += ucc_geth_rx(ugeth, i, rx_work_limit);
3550	}
3551
3552	dev->quota -= howmany;
3553	rx_work_limit -= howmany;
3554	*budget -= howmany;
3555
3556	if (rx_work_limit > 0) {
3557		netif_rx_complete(dev);
3558		uccf = ugeth->uccf;
3559		uccm = in_be32(uccf->p_uccm);
3560		uccm |= UCCE_RX_EVENTS;
3561		out_be32(uccf->p_uccm, uccm);
3562	}
3563
3564	return (rx_work_limit > 0) ? 0 : 1;
3565}
3566#endif				/* CONFIG_UGETH_NAPI */
3567
3568static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3569{
3570	struct net_device *dev = (struct net_device *)info;
3571	struct ucc_geth_private *ugeth = netdev_priv(dev);
3572	struct ucc_fast_private *uccf;
3573	struct ucc_geth_info *ug_info;
3574	register u32 ucce;
3575	register u32 uccm;
3576#ifndef CONFIG_UGETH_NAPI
3577	register u32 rx_mask;
3578#endif
3579	register u32 tx_mask;
3580	u8 i;
3581
3582	ugeth_vdbg("%s: IN", __FUNCTION__);
3583
3584	if (!ugeth)
3585		return IRQ_NONE;
3586
3587	uccf = ugeth->uccf;
3588	ug_info = ugeth->ug_info;
3589
3590	/* read and clear events */
3591	ucce = (u32) in_be32(uccf->p_ucce);
3592	uccm = (u32) in_be32(uccf->p_uccm);
3593	ucce &= uccm;
3594	out_be32(uccf->p_ucce, ucce);
3595
3596	/* check for receive events that require processing */
3597	if (ucce & UCCE_RX_EVENTS) {
3598#ifdef CONFIG_UGETH_NAPI
3599		if (netif_rx_schedule_prep(dev)) {
3600		uccm &= ~UCCE_RX_EVENTS;
3601			out_be32(uccf->p_uccm, uccm);
3602			__netif_rx_schedule(dev);
3603		}
3604#else
3605		rx_mask = UCCE_RXBF_SINGLE_MASK;
3606		for (i = 0; i < ug_info->numQueuesRx; i++) {
3607			if (ucce & rx_mask)
3608				ucc_geth_rx(ugeth, i, (int)ugeth->ug_info->bdRingLenRx[i]);
3609			ucce &= ~rx_mask;
3610			rx_mask <<= 1;
3611		}
3612#endif /* CONFIG_UGETH_NAPI */
3613	}
3614
3615	/* Tx event processing */
3616	if (ucce & UCCE_TX_EVENTS) {
3617		spin_lock(&ugeth->lock);
3618		tx_mask = UCCE_TXBF_SINGLE_MASK;
3619		for (i = 0; i < ug_info->numQueuesTx; i++) {
3620			if (ucce & tx_mask)
3621				ucc_geth_tx(dev, i);
3622			ucce &= ~tx_mask;
3623			tx_mask <<= 1;
3624		}
3625		spin_unlock(&ugeth->lock);
3626	}
3627
3628	/* Errors and other events */
3629	if (ucce & UCCE_OTHER) {
3630		if (ucce & UCCE_BSY) {
3631			ugeth->stats.rx_errors++;
3632		}
3633		if (ucce & UCCE_TXE) {
3634			ugeth->stats.tx_errors++;
3635		}
3636	}
3637
3638	return IRQ_HANDLED;
3639}
3640
3641/* Called when something needs to use the ethernet device */
3642/* Returns 0 for success. */
3643static int ucc_geth_open(struct net_device *dev)
3644{
3645	struct ucc_geth_private *ugeth = netdev_priv(dev);
3646	int err;
3647
3648	ugeth_vdbg("%s: IN", __FUNCTION__);
3649
3650	/* Test station address */
3651	if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
3652		ugeth_err("%s: Multicast address used for station address"
3653			  " - is this what you wanted?", __FUNCTION__);
3654		return -EINVAL;
3655	}
3656
3657	err = ucc_struct_init(ugeth);
3658	if (err) {
3659		ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name);
3660		return err;
3661	}
3662
3663	err = ucc_geth_startup(ugeth);
3664	if (err) {
3665		ugeth_err("%s: Cannot configure net device, aborting.",
3666			  dev->name);
3667		return err;
3668	}
3669
3670	err = adjust_enet_interface(ugeth);
3671	if (err) {
3672		ugeth_err("%s: Cannot configure net device, aborting.",
3673			  dev->name);
3674		return err;
3675	}
3676
3677	/*       Set MACSTNADDR1, MACSTNADDR2                */
3678	/* For more details see the hardware spec.           */
3679	init_mac_station_addr_regs(dev->dev_addr[0],
3680				   dev->dev_addr[1],
3681				   dev->dev_addr[2],
3682				   dev->dev_addr[3],
3683				   dev->dev_addr[4],
3684				   dev->dev_addr[5],
3685				   &ugeth->ug_regs->macstnaddr1,
3686				   &ugeth->ug_regs->macstnaddr2);
3687
3688	err = init_phy(dev);
3689	if (err) {
3690		ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name);
3691		return err;
3692	}
3693
3694	phy_start(ugeth->phydev);
3695
3696	err =
3697	    request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
3698			"UCC Geth", dev);
3699	if (err) {
3700		ugeth_err("%s: Cannot get IRQ for net device, aborting.",
3701			  dev->name);
3702		ucc_geth_stop(ugeth);
3703		return err;
3704	}
3705
3706	err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
3707	if (err) {
3708		ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
3709		ucc_geth_stop(ugeth);
3710		return err;
3711	}
3712
3713	netif_start_queue(dev);
3714
3715	return err;
3716}
3717
3718/* Stops the kernel queue, and halts the controller */
3719static int ucc_geth_close(struct net_device *dev)
3720{
3721	struct ucc_geth_private *ugeth = netdev_priv(dev);
3722
3723	ugeth_vdbg("%s: IN", __FUNCTION__);
3724
3725	ucc_geth_stop(ugeth);
3726
3727	phy_disconnect(ugeth->phydev);
3728	ugeth->phydev = NULL;
3729
3730	netif_stop_queue(dev);
3731
3732	return 0;
3733}
3734
3735const struct ethtool_ops ucc_geth_ethtool_ops = { };
3736
3737static phy_interface_t to_phy_interface(const char *phy_connection_type)
3738{
3739	if (strcasecmp(phy_connection_type, "mii") == 0)
3740		return PHY_INTERFACE_MODE_MII;
3741	if (strcasecmp(phy_connection_type, "gmii") == 0)
3742		return PHY_INTERFACE_MODE_GMII;
3743	if (strcasecmp(phy_connection_type, "tbi") == 0)
3744		return PHY_INTERFACE_MODE_TBI;
3745	if (strcasecmp(phy_connection_type, "rmii") == 0)
3746		return PHY_INTERFACE_MODE_RMII;
3747	if (strcasecmp(phy_connection_type, "rgmii") == 0)
3748		return PHY_INTERFACE_MODE_RGMII;
3749	if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
3750		return PHY_INTERFACE_MODE_RGMII_ID;
3751	if (strcasecmp(phy_connection_type, "rtbi") == 0)
3752		return PHY_INTERFACE_MODE_RTBI;
3753
3754	return PHY_INTERFACE_MODE_MII;
3755}
3756
3757static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
3758{
3759	struct device *device = &ofdev->dev;
3760	struct device_node *np = ofdev->node;
3761	struct device_node *mdio;
3762	struct net_device *dev = NULL;
3763	struct ucc_geth_private *ugeth = NULL;
3764	struct ucc_geth_info *ug_info;
3765	struct resource res;
3766	struct device_node *phy;
3767	int err, ucc_num, max_speed = 0;
3768	const phandle *ph;
3769	const unsigned int *prop;
3770	const void *mac_addr;
3771	phy_interface_t phy_interface;
3772	static const int enet_to_speed[] = {
3773		SPEED_10, SPEED_10, SPEED_10,
3774		SPEED_100, SPEED_100, SPEED_100,
3775		SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
3776	};
3777	static const phy_interface_t enet_to_phy_interface[] = {
3778		PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
3779		PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
3780		PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
3781		PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
3782		PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
3783	};
3784
3785	ugeth_vdbg("%s: IN", __FUNCTION__);
3786
3787	prop = of_get_property(np, "device-id", NULL);
3788	ucc_num = *prop - 1;
3789	if ((ucc_num < 0) || (ucc_num > 7))
3790		return -ENODEV;
3791
3792	ug_info = &ugeth_info[ucc_num];
3793	ug_info->uf_info.ucc_num = ucc_num;
3794
3795	prop = of_get_property(np, "rx-clock", NULL);
3796	ug_info->uf_info.rx_clock = *prop;
3797	prop = of_get_property(np, "tx-clock", NULL);
3798	ug_info->uf_info.tx_clock = *prop;
3799	err = of_address_to_resource(np, 0, &res);
3800	if (err)
3801		return -EINVAL;
3802
3803	ug_info->uf_info.regs = res.start;
3804	ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
3805
3806	ph = of_get_property(np, "phy-handle", NULL);
3807	phy = of_find_node_by_phandle(*ph);
3808
3809	if (phy == NULL)
3810		return -ENODEV;
3811
3812	/* set the PHY address */
3813	prop = of_get_property(phy, "reg", NULL);
3814	if (prop == NULL)
3815		return -1;
3816	ug_info->phy_address = *prop;
3817
3818	/* get the phy interface type, or default to MII */
3819	prop = of_get_property(np, "phy-connection-type", NULL);
3820	if (!prop) {
3821		/* handle interface property present in old trees */
3822		prop = of_get_property(phy, "interface", NULL);
3823		if (prop != NULL) {
3824			phy_interface = enet_to_phy_interface[*prop];
3825			max_speed = enet_to_speed[*prop];
3826		} else
3827			phy_interface = PHY_INTERFACE_MODE_MII;
3828	} else {
3829		phy_interface = to_phy_interface((const char *)prop);
3830	}
3831
3832	/* get speed, or derive from PHY interface */
3833	if (max_speed == 0)
3834		switch (phy_interface) {
3835		case PHY_INTERFACE_MODE_GMII:
3836		case PHY_INTERFACE_MODE_RGMII:
3837		case PHY_INTERFACE_MODE_RGMII_ID:
3838		case PHY_INTERFACE_MODE_TBI:
3839		case PHY_INTERFACE_MODE_RTBI:
3840			max_speed = SPEED_1000;
3841			break;
3842		default:
3843			max_speed = SPEED_100;
3844			break;
3845		}
3846
3847	if (max_speed == SPEED_1000) {
3848		/* configure muram FIFOs for gigabit operation */
3849		ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
3850		ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
3851		ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
3852		ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
3853		ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
3854		ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
3855	}
3856
3857	/* Set the bus id */
3858	mdio = of_get_parent(phy);
3859
3860	if (mdio == NULL)
3861		return -1;
3862
3863	err = of_address_to_resource(mdio, 0, &res);
3864	of_node_put(mdio);
3865
3866	if (err)
3867		return -1;
3868
3869	ug_info->mdio_bus = res.start;
3870
3871	printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
3872		ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
3873		ug_info->uf_info.irq);
3874
3875	if (ug_info == NULL) {
3876		ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
3877			  ucc_num);
3878		return -ENODEV;
3879	}
3880
3881	/* Create an ethernet device instance */
3882	dev = alloc_etherdev(sizeof(*ugeth));
3883
3884	if (dev == NULL)
3885		return -ENOMEM;
3886
3887	ugeth = netdev_priv(dev);
3888	spin_lock_init(&ugeth->lock);
3889
3890	dev_set_drvdata(device, dev);
3891
3892	/* Set the dev->base_addr to the gfar reg region */
3893	dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
3894
3895	SET_MODULE_OWNER(dev);
3896	SET_NETDEV_DEV(dev, device);
3897
3898	/* Fill in the dev structure */
3899	dev->open = ucc_geth_open;
3900	dev->hard_start_xmit = ucc_geth_start_xmit;
3901	dev->tx_timeout = ucc_geth_timeout;
3902	dev->watchdog_timeo = TX_TIMEOUT;
3903#ifdef CONFIG_UGETH_NAPI
3904	dev->poll = ucc_geth_poll;
3905	dev->weight = UCC_GETH_DEV_WEIGHT;
3906#endif				/* CONFIG_UGETH_NAPI */
3907	dev->stop = ucc_geth_close;
3908	dev->get_stats = ucc_geth_get_stats;
3909//    dev->change_mtu = ucc_geth_change_mtu;
3910	dev->mtu = 1500;
3911	dev->set_multicast_list = ucc_geth_set_multi;
3912	dev->ethtool_ops = &ucc_geth_ethtool_ops;
3913
3914	ugeth->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3915	ugeth->phy_interface = phy_interface;
3916	ugeth->max_speed = max_speed;
3917
3918	err = register_netdev(dev);
3919	if (err) {
3920		ugeth_err("%s: Cannot register net device, aborting.",
3921			  dev->name);
3922		free_netdev(dev);
3923		return err;
3924	}
3925
3926	mac_addr = of_get_mac_address(np);
3927	if (mac_addr)
3928		memcpy(dev->dev_addr, mac_addr, 6);
3929
3930	ugeth->ug_info = ug_info;
3931	ugeth->dev = dev;
3932
3933	return 0;
3934}
3935
3936static int ucc_geth_remove(struct of_device* ofdev)
3937{
3938	struct device *device = &ofdev->dev;
3939	struct net_device *dev = dev_get_drvdata(device);
3940	struct ucc_geth_private *ugeth = netdev_priv(dev);
3941
3942	dev_set_drvdata(device, NULL);
3943	ucc_geth_memclean(ugeth);
3944	free_netdev(dev);
3945
3946	return 0;
3947}
3948
3949static struct of_device_id ucc_geth_match[] = {
3950	{
3951		.type = "network",
3952		.compatible = "ucc_geth",
3953	},
3954	{},
3955};
3956
3957MODULE_DEVICE_TABLE(of, ucc_geth_match);
3958
3959static struct of_platform_driver ucc_geth_driver = {
3960	.name		= DRV_NAME,
3961	.match_table	= ucc_geth_match,
3962	.probe		= ucc_geth_probe,
3963	.remove		= ucc_geth_remove,
3964};
3965
3966static int __init ucc_geth_init(void)
3967{
3968	int i, ret;
3969
3970	ret = uec_mdio_init();
3971
3972	if (ret)
3973		return ret;
3974
3975	printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
3976	for (i = 0; i < 8; i++)
3977		memcpy(&(ugeth_info[i]), &ugeth_primary_info,
3978		       sizeof(ugeth_primary_info));
3979
3980	ret = of_register_platform_driver(&ucc_geth_driver);
3981
3982	if (ret)
3983		uec_mdio_exit();
3984
3985	return ret;
3986}
3987
3988static void __exit ucc_geth_exit(void)
3989{
3990	of_unregister_platform_driver(&ucc_geth_driver);
3991	uec_mdio_exit();
3992}
3993
3994module_init(ucc_geth_init);
3995module_exit(ucc_geth_exit);
3996
3997MODULE_AUTHOR("Freescale Semiconductor, Inc");
3998MODULE_DESCRIPTION(DRV_DESC);
3999MODULE_VERSION(DRV_VERSION);
4000MODULE_LICENSE("GPL");
4001