• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/arm/plat-brcm/
1/*
2 * Northstar AMAC Ethernet driver
3 *
4 *
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/io.h>
10#include <linux/bug.h>
11#include <linux/ioport.h>
12#include <linux/dmapool.h>
13#include <linux/interrupt.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/if_vlan.h>
17
18#undef	_EXTRA_DEBUG
19
20#define	AMAC_MAX_PACKET	(ETH_FRAME_LEN+ETH_FCS_LEN+2*VLAN_HLEN)
21
22
23/*
24 * RESOURCES
25 */
26static struct resource amac_regs[4] = {
27	{
28	.name	= "amac0regs", .flags	= IORESOURCE_MEM,
29	.start	= 0x18024000, .end	= 0x18024FFF,
30	},
31	{
32	.name	= "amac1regs", .flags	= IORESOURCE_MEM,
33	.start	= 0x18025000, .end	= 0x18025FFF,
34	},
35	{
36	.name	= "amac2regs", .flags	= IORESOURCE_MEM,
37	.start	= 0x18026000, .end	= 0x18026FFF,
38	},
39	{
40	.name	= "amac3regs", .flags	= IORESOURCE_MEM,
41	.start	= 0x18027000, .end	= 0x18027FFF,
42	},
43
44};
45
46static struct resource amac_irqs[4] = {
47	{
48	.name	= "amac0irq",	.flags	= IORESOURCE_IRQ,
49	.start	= 179,  .end	= 179,
50	},
51	{
52	.name	= "amac1irq",	.flags	= IORESOURCE_IRQ,
53	.start	= 180,	 .end	= 180,
54	},
55	{
56	.name	= "amac2irq",	.flags	= IORESOURCE_IRQ,
57	.start	= 181,	 .end	= 181,
58	},
59	{
60	.name	= "amac3irq",	.flags	= IORESOURCE_IRQ,
61	.start	= 182,	 .end	= 182,
62	},
63};
64
65/*
66 * REGISTERS
67 *
68 * Individual bit-fields aof registers are specificed here
69 * for clarity, and the rest of the code will access each field
70 * as if it was its own register.
71 *
72 */
73#define	REG_BIT_FIELD(r,p,w)	((reg_bit_field_t){(r),(p),(w)})
74
75#define	GMAC_CTL_TX_ARB_MODE		REG_BIT_FIELD(0x0, 0, 1)
76#define	GMAC_CTL_RX_OVFLOW_MODE		REG_BIT_FIELD(0x0, 1, 1)
77#define	GMAC_CTL_FLOW_CNTLSRC		REG_BIT_FIELD(0x0, 2, 1)
78#define	GMAC_CTL_LINKSTAT_SEL		REG_BIT_FIELD(0x0, 3, 1)
79#define	GMAC_CTL_MIB_RESET		REG_BIT_FIELD(0x0, 4, 1)
80#define	GMAC_CTL_FLOW_CNTL_MODE		REG_BIT_FIELD(0x0, 5, 2)
81#define	GMAC_CTL_NWAY_AUTO_POLL		REG_BIT_FIELD(0x0, 7, 1)
82#define	GMAC_CTL_TX_FLUSH		REG_BIT_FIELD(0x0, 8, 1)
83#define	GMAC_CTL_RXCLK_DMG		REG_BIT_FIELD(0x0, 16, 2)
84#define	GMAC_CTL_TXCLK_DMG		REG_BIT_FIELD(0x0, 18, 2)
85#define	GMAC_CTL_RXCLK_DLL		REG_BIT_FIELD(0x0, 20, 1)
86#define	GMAC_CTL_TXCLK_DLL		REG_BIT_FIELD(0x0, 21, 1)
87
88#define	GMAC_STAT			REG_BIT_FIELD(0x04, 0, 32)
89#define	GMAC_STAT_RX_FIFO_FULL		REG_BIT_FIELD(0x04, 0, 1)
90#define	GMAC_STAT_RX_DBUF_FULL		REG_BIT_FIELD(0x04, 1, 1)
91#define	GMAC_STAT_RX_IBUF_FULL		REG_BIT_FIELD(0x04, 2, 1)
92#define	GMAC_STAT_TX_FIFO_FULL		REG_BIT_FIELD(0x04, 3, 1)
93#define	GMAC_STAT_TX_DBUF_FULL		REG_BIT_FIELD(0x04, 4, 1)
94#define	GMAC_STAT_TX_IBUF_FULL		REG_BIT_FIELD(0x04, 5, 1)
95#define	GMAC_STAT_TX_PAUSE		REG_BIT_FIELD(0x04, 6, 1)
96#define	GMAC_STAT_TX_IF_MODE		REG_BIT_FIELD(0x04, 7, 2)
97#define	GMAC_STAT_RX_Q_SIZE		REG_BIT_FIELD(0x04, 16, 4)
98#define	GMAC_STAT_TX_Q_SIZE		REG_BIT_FIELD(0x04, 20, 4)
99
100#define	GMAC_INTSTAT			REG_BIT_FIELD(0x020, 0, 32)
101#define	GMAC_INTSTAT_MIB_RX_OVRUN	REG_BIT_FIELD(0x020, 0, 1)
102#define	GMAC_INTSTAT_MIB_TX_OVRUN	REG_BIT_FIELD(0x020, 1, 1)
103#define	GMAC_INTSTAT_TX_FLUSH_DONE	REG_BIT_FIELD(0x020, 2, 1)
104#define	GMAC_INTSTAT_MII_LINK_CHANGE	REG_BIT_FIELD(0x020, 3, 1)
105#define	GMAC_INTSTAT_MDIO_DONE		REG_BIT_FIELD(0x020, 4, 1)
106#define	GMAC_INTSTAT_MIB_RX_HALF	REG_BIT_FIELD(0x020, 5, 1)
107#define	GMAC_INTSTAT_MIB_TX_HALF	REG_BIT_FIELD(0x020, 6, 1)
108#define	GMAC_INTSTAT_TIMER_INT		REG_BIT_FIELD(0x020, 7, 1)
109#define	GMAC_INTSTAT_SW_LINK_CHANGE	REG_BIT_FIELD(0x020, 8, 1)
110#define	GMAC_INTSTAT_DMA_DESC_ERR	REG_BIT_FIELD(0x020, 10, 1)
111#define	GMAC_INTSTAT_DMA_DATA_ERR	REG_BIT_FIELD(0x020, 11, 1)
112#define	GMAC_INTSTAT_DMA_PROTO_ERR	REG_BIT_FIELD(0x020, 12, 1)
113#define	GMAC_INTSTAT_DMA_RX_UNDERFLOW	REG_BIT_FIELD(0x020, 13, 1)
114#define	GMAC_INTSTAT_DMA_RX_OVERFLOW	REG_BIT_FIELD(0x020, 14, 1)
115#define	GMAC_INTSTAT_DMA_TX_UNDERFLOW	REG_BIT_FIELD(0x020, 15, 1)
116#define	GMAC_INTSTAT_RX_INT		REG_BIT_FIELD(0x020, 16, 1)
117#define	GMAC_INTSTAT_TX_INT(q)		REG_BIT_FIELD(0x020, 24+(q), 1)
118#define	GMAC_INTSTAT_RX_ECC_SOFT	REG_BIT_FIELD(0x020, 28, 1)
119#define	GMAC_INTSTAT_RX_ECC_HARD	REG_BIT_FIELD(0x020, 29, 1)
120#define	GMAC_INTSTAT_TX_ECC_SOFT	REG_BIT_FIELD(0x020, 30, 1)
121#define	GMAC_INTSTAT_TX_ECC_HARD	REG_BIT_FIELD(0x020, 31, 1)
122
123#define	GMAC_INTMASK			REG_BIT_FIELD(0x024, 0, 32)
124#define	GMAC_INTMASK_MIB_RX_OVRUN	REG_BIT_FIELD(0x024, 0, 1)
125#define	GMAC_INTMASK_MIB_TX_OVRUN	REG_BIT_FIELD(0x024, 1, 1)
126#define	GMAC_INTMASK_TX_FLUSH_DONE	REG_BIT_FIELD(0x024, 2, 1)
127#define	GMAC_INTMASK_MII_LINK_CHANGE	REG_BIT_FIELD(0x024, 3, 1)
128#define	GMAC_INTMASK_MDIO_DONE		REG_BIT_FIELD(0x024, 4, 1)
129#define	GMAC_INTMASK_MIB_RX_HALF	REG_BIT_FIELD(0x024, 5, 1)
130#define	GMAC_INTMASK_MIB_TX_HALF	REG_BIT_FIELD(0x024, 6, 1)
131#define	GMAC_INTMASK_TIMER_INT		REG_BIT_FIELD(0x024, 7, 1)
132#define	GMAC_INTMASK_SW_LINK_CHANGE	REG_BIT_FIELD(0x024, 8, 1)
133#define	GMAC_INTMASK_DMA_DESC_ERR	REG_BIT_FIELD(0x024, 10, 1)
134#define	GMAC_INTMASK_DMA_DATA_ERR	REG_BIT_FIELD(0x024, 11, 1)
135#define	GMAC_INTMASK_DMA_PROTO_ERR	REG_BIT_FIELD(0x024, 12, 1)
136#define	GMAC_INTMASK_DMA_RX_UNDERFLOW	REG_BIT_FIELD(0x024, 13, 1)
137#define	GMAC_INTMASK_DMA_RX_OVERFLOW	REG_BIT_FIELD(0x024, 14, 1)
138#define	GMAC_INTMASK_DMA_TX_UNDERFLOW	REG_BIT_FIELD(0x024, 15, 1)
139#define	GMAC_INTMASK_RX_INT		REG_BIT_FIELD(0x024, 16, 1)
140#define	GMAC_INTMASK_TX_INT(q)		REG_BIT_FIELD(0x024, 24+(q), 1)
141#define	GMAC_INTMASK_RX_ECC_SOFT	REG_BIT_FIELD(0x024, 28, 1)
142#define	GMAC_INTMASK_RX_ECC_HARD	REG_BIT_FIELD(0x024, 29, 1)
143#define	GMAC_INTMASK_TX_ECC_SOFT	REG_BIT_FIELD(0x024, 30, 1)
144#define	GMAC_INTMASK_TX_ECC_HARD	REG_BIT_FIELD(0x024, 31, 1)
145
146#define	GMAC_TIMER			REG_BIT_FIELD(0x028, 0, 32)
147
148#define	GMAC_INTRX_LZY_TIMEOUT		REG_BIT_FIELD(0x100,0,24)
149#define	GMAC_INTRX_LZY_FRMCNT		REG_BIT_FIELD(0x100,24,8)
150
151#define	GMAC_PHYACC_DATA		REG_BIT_FIELD(0x180, 0, 16)
152#define	GMAC_PHYACC_ADDR		REG_BIT_FIELD(0x180, 16, 5)
153#define	GMAC_PHYACC_REG			REG_BIT_FIELD(0x180, 24, 5)
154#define	GMAC_PHYACC_WRITE		REG_BIT_FIELD(0x180, 29, 1)
155#define	GMAC_PHYACC_GO			REG_BIT_FIELD(0x180, 30, 1)
156
157#define	GMAC_PHYCTL_ADDR		REG_BIT_FIELD(0x188, 0, 5)
158#define	GMAC_PHYCTL_MDC_CYCLE		REG_BIT_FIELD(0x188, 16, 7)
159#define	GMAC_PHYCTL_MDC_TRANS		REG_BIT_FIELD(0x188, 23, 1)
160
161/* GMAC has 4 Tx queues, all bit-fields are defined per <q> number */
162#define	GMAC_TXCTL(q)			REG_BIT_FIELD(0x200+((q)<<6),0,32)
163#define	GMAC_TXCTL_TX_EN(q)		REG_BIT_FIELD(0x200+((q)<<6),0,1)
164#define	GMAC_TXCTL_SUSPEND(q)		REG_BIT_FIELD(0x200+((q)<<6),1,1)
165#define	GMAC_TXCTL_DMALOOPBACK(q)	REG_BIT_FIELD(0x200+((q)<<6),2,1)
166#define	GMAC_TXCTL_DESC_ALIGN(q)	REG_BIT_FIELD(0x200+((q)<<6),5,1)
167#define	GMAC_TXCTL_OUTSTAND_READS(q)	REG_BIT_FIELD(0x200+((q)<<6),6,2)
168#define	GMAC_TXCTL_PARITY_DIS(q)	REG_BIT_FIELD(0x200+((q)<<6),11,1)
169#define	GMAC_TXCTL_DNA_ACT_INDEX(q)	REG_BIT_FIELD(0x200+((q)<<6),13,1)
170#define	GMAC_TXCTL_EXTADDR(q)		REG_BIT_FIELD(0x200+((q)<<6),16,2)
171#define	GMAC_TXCTL_BURST_LEN(q)		REG_BIT_FIELD(0x200+((q)<<6),18,3)
172#define	GMAC_TXCTL_PFETCH_CTL(q)	REG_BIT_FIELD(0x200+((q)<<6),21,3)
173#define	GMAC_TXCTL_PFETCH_TH(q)		REG_BIT_FIELD(0x200+((q)<<6),24,2)
174
175#define	GMAC_TX_PTR(q)			REG_BIT_FIELD(0x204+((q)<<6),0,12)
176#define	GMAC_TX_ADDR_LOW(q)		REG_BIT_FIELD(0x208+((q)<<6),0,32)
177#define	GMAC_TX_ADDR_HIGH(q)		REG_BIT_FIELD(0x20c+((q)<<6),0,32)
178#define	GMAC_TXSTAT_CURR_DESC(q)	REG_BIT_FIELD(0x210+((q)<<6),0,12)
179#define	GMAC_TXSTAT_TXSTATE(q)		REG_BIT_FIELD(0x210+((q)<<6),28,4)
180#define	GMAC_TXSTAT_ACT_DESC(q)		REG_BIT_FIELD(0x214+((q)<<6),0,12)
181#define	GMAC_TXSTAT_TXERR(q)		REG_BIT_FIELD(0x214+((q)<<6),28,4)
182
183#define	GMAC_RXCTL			REG_BIT_FIELD(0x220,0,32)
184#define	GMAC_RXCTL_RX_EN		REG_BIT_FIELD(0x220,0,1)
185#define	GMAC_RXCTL_RX_OFFSET		REG_BIT_FIELD(0x220,1,7)
186#define	GMAC_RXCTL_SEP_HDR_DESC		REG_BIT_FIELD(0x220,9,1)
187#define	GMAC_RXCTL_OFLOW_CONT		REG_BIT_FIELD(0x220,10,1)
188#define	GMAC_RXCTL_PARITY_DIS		REG_BIT_FIELD(0x220,11,1)
189#define	GMAC_RXCTL_WAIT_COMPLETE	REG_BIT_FIELD(0x220,12,1)
190#define	GMAC_RXCTL_DMA_ACT_INDEX	REG_BIT_FIELD(0x220,13,1)
191#define	GMAC_RXCTL_EXTADDR		REG_BIT_FIELD(0x220,16,2)
192#define	GMAC_RXCTL_BURST_LEN		REG_BIT_FIELD(0x220,18,3)
193#define	GMAC_RXCTL_PFETCH_CTL		REG_BIT_FIELD(0x220,21,3)
194#define	GMAC_RXCTL_PFETCH_TH		REG_BIT_FIELD(0x220,24,2)
195#define	GMAC_RX_PTR			REG_BIT_FIELD(0x224,0,12)
196#define	GMAC_RX_ADDR_LOW		REG_BIT_FIELD(0x228,0,32)
197#define	GMAC_RX_ADDR_HIGH		REG_BIT_FIELD(0x22c,0,32)
198#define	GMAC_RXSTAT_CURR_DESC		REG_BIT_FIELD(0x230,0,12)
199#define	GMAC_RXSTAT_RXSTATE		REG_BIT_FIELD(0x230,28,4)
200#define	GMAC_RXSTAT_ACT_DESC		REG_BIT_FIELD(0x234,0,12)
201#define	GMAC_RXSTAT_RXERR		REG_BIT_FIELD(0x234,28,4)
202
203#define	UMAC_CORE_VERSION		REG_BIT_FIELD(0x800,0,32)
204#define	UMAC_HD_FC_ENA			REG_BIT_FIELD(0x804,0,1)
205#define	UMAC_HD_FC_NKOFF		REG_BIT_FIELD(0x804,1,1)
206#define	UMAC_IPG_CONFIG_RX		REG_BIT_FIELD(0x804,2,5)
207
208#define	UMAC_CONFIG			REG_BIT_FIELD(0x808,0,32)
209#define	UMAC_CONFIG_TX_EN		REG_BIT_FIELD(0x808,0,1)
210#define	UMAC_CONFIG_RX_EN		REG_BIT_FIELD(0x808,1,1)
211#define	UMAC_CONFIG_ETH_SPEED		REG_BIT_FIELD(0x808,2,2)
212#define	UMAC_CONFIG_PROMISC		REG_BIT_FIELD(0x808,4,1)
213#define	UMAC_CONFIG_PAD_EN		REG_BIT_FIELD(0x808,5,1)
214#define	UMAC_CONFIG_CRC_FW		REG_BIT_FIELD(0x808,6,1)
215#define	UMAC_CONFIG_PAUSE_FW		REG_BIT_FIELD(0x808,7,1)
216#define	UMAC_CONFIG_RX_PAUSE_IGN	REG_BIT_FIELD(0x808,8,1)
217#define	UMAC_CONFIG_TX_ADDR_INS		REG_BIT_FIELD(0x808,9,1)
218#define	UMAC_CONFIG_HD_ENA		REG_BIT_FIELD(0x808,10,1)
219#define	UMAC_CONFIG_SW_RESET		REG_BIT_FIELD(0x808,11,1)
220#define	UMAC_CONFIG_LCL_LOOP_EN		REG_BIT_FIELD(0x808,15,1)
221#define	UMAC_CONFIG_AUTO_EN		REG_BIT_FIELD(0x808,22,1)
222#define	UMAC_CONFIG_CNTLFRM_EN		REG_BIT_FIELD(0x808,23,1)
223#define	UMAC_CONFIG_LNGTHCHK_DIS	REG_BIT_FIELD(0x808,24,1)
224#define	UMAC_CONFIG_RMT_LOOP_EN		REG_BIT_FIELD(0x808,25,1)
225#define	UMAC_CONFIG_PREAMB_EN		REG_BIT_FIELD(0x808,27,1)
226#define	UMAC_CONFIG_TX_PAUSE_IGN	REG_BIT_FIELD(0x808,28,1)
227#define	UMAC_CONFIG_TXRX_AUTO_EN	REG_BIT_FIELD(0x808,29,1)
228
229#define	UMAC_MACADDR_LOW		REG_BIT_FIELD(0x80c,0,32)
230#define	UMAC_MACADDR_HIGH		REG_BIT_FIELD(0x810,0,16)
231#define	UMAC_FRM_LENGTH			REG_BIT_FIELD(0x814,0,14)
232#define	UMAC_PAUSE_QUANT		REG_BIT_FIELD(0x818,0,16)
233
234#define	UMAC_MAC_STAT			REG_BIT_FIELD(0x844,0,32)
235#define	UMAC_MAC_SPEED			REG_BIT_FIELD(0x844,0,2)
236#define	UMAC_MAC_DUPLEX			REG_BIT_FIELD(0x844,2,1)
237#define	UMAC_MAC_RX_PAUSE		REG_BIT_FIELD(0x844,3,1)
238#define	UMAC_MAC_TX_PAUSE		REG_BIT_FIELD(0x844,4,1)
239#define	UMAC_MAC_LINK			REG_BIT_FIELD(0x844,5,1)
240
241#define	UMAC_FRM_TAG0			REG_BIT_FIELD(0x848,0,16)
242#define	UMAC_FRM_TAG1			REG_BIT_FIELD(0x84c,0,16)
243#define	UMAC_IPG_CONFIG_TX		REG_BIT_FIELD(0x85c,0,5)
244
245#define	UMAC_PAUSE_TIMER		REG_BIT_FIELD(0xb30,0,17)
246#define	UMAC_PAUSE_CONTROL_EN		REG_BIT_FIELD(0xb30,17,1)
247#define	UMAC_TXFIFO_FLUSH		REG_BIT_FIELD(0xb34,0,1)
248#define	UMAC_RXFIFO_STAT		REG_BIT_FIELD(0xb38,0,2)
249
250#define	GMAC_MIB_COUNTERS_OFFSET	0x300
251
252/* GMAC MIB counters structure, with same names as link_stats_64 if exists */
253struct gmac_mib_counters {
254	/* TX stats */
255	u64	tx_bytes;		/* 0x300-0x304 */
256        u32	tx_packets;		/* 0x308 */
257	u64	tx_all_bytes;		/* 0x30c-0x310 */
258	u32	tx_all_packets;		/* 0x314 */
259	u32	tx_bcast_packets;	/* 0x318 */
260	u32	tx_mcast_packets;	/* 0x31c */
261	u32	tx_64b_packets;		/* 0x320 */
262	u32	tx_127b_packets;	/* 0x324 */
263	u32	tx_255b_packets;	/* 0x328 */
264	u32	tx_511b_packets;	/* 0x32c */
265	u32	tx_1kb_packets;		/* 0x330 */
266	u32	tx_1_5kb_packets;	/* 0x334 */
267	u32	tx_2kb_packets;		/* 0x338 */
268	u32	tx_4kb_pakcets;		/* 0x33c */
269	u32	tx_8kb_pakcets;		/* 0x340 */
270	u32	tx_max_pakcets;		/* 0x344 */
271	u32	tx_jabbers;		/* 0x348 */
272	u32	tx_oversize;		/* 0x34c */
273	u32	tx_runt;		/* 0x350 */
274        u32	tx_fifo_errors;		/* 0x354 */
275	u32	collisions;		/* 0x358 */
276	u32	tx_1_col_packets;	/* 0x35c */
277	u32	tx_m_col_packets;	/* 0x360 */
278        u32	tx_aborted_errors;	/* 0x364 */
279	u32	tx_window_errors;	/* 0x368 */
280	u32	tx_deferred_packets;	/* 0x36c */
281	u32	tx_carrier_errors;	/* 0x370 */
282	u32	tx_pause_packets;	/* 0x374 */
283	u32	tx_unicast_packets;	/* 0x378 */
284	u32	tx_qos_0_packets;	/* 0x37c */
285	u64	tx_qos_0_bytes;		/* 0x380-0x384 */
286	u32	tx_qos_1_packets;	/* 0x388 */
287	u64	tx_qos_1_bytes;		/* 0x38c-0x390 */
288	u32	tx_qos_2_packets;	/* 0x394 */
289	u64	tx_qos_2_bytes;		/* 0x398-0x39c */
290	u32	tx_qos_3_packets;	/* 0x3a0 */
291	u64	tx_qos_3_bytes;		/* 0x3a4-0x3a8 */
292	u32	reserved[1];		/* 0x3ac */
293
294	/* RX stats */
295        u64	rx_bytes;		/* 0x3b0-0x3b4 */
296	u32	rx_packets;		/* 0x3b8 */
297	u64	rx_all_bytes;		/* 0x3bc-0x3c0 */
298	u32	rx_all_packets;		/* 0x3c4 */
299	u32	rx_bcast_packets;	/* 0x3c8 */
300	u32	multicast;		/* 0x3cc */
301	u32	rx_64b_packets;		/* 0x3d0 */
302	u32	rx_127b_packets;	/* 0x3d4 */
303	u32	rx_255b_packets;	/* 0x3d8 */
304	u32	rx_511b_packets;	/* 0x3dc */
305	u32	rx_1kb_packets;		/* 0x3e0 */
306	u32	rx_1_5kb_packets;	/* 0x3e4 */
307	u32	rx_2kb_packets;		/* 0x3e8 */
308	u32	rx_4kb_packets;		/* 0x3ec */
309	u32	rx_8kb_packets;		/* 0x3f0 */
310	u32	rx_max_packets;		/* 0x3f4 */
311	u32	rx_jabbers;		/* 0x3f8 */
312	u32	rx_length_errors;	/* 0x3fc */
313	u32	rx_runt_bad_packets;	/* 0x400 */
314	u32	rx_over_errors;         /* 0x404 */
315	u32	rx_crc_or_align_errors;	/* 0x408 */
316	u32	rx_runt_good_packets;	/* 0x40c */
317	u32	rx_crc_errors;		/* 0x410 */
318	u32	rx_frame_errors;	/* 0x414 */
319	u32	rx_missed_errors;	/* 0x418 */
320	u32	rx_pause_packets;	/* 0x41c */
321	u32	rx_control_packets;	/* 0x420 */
322	u32	rx_src_mac_chanhges;	/* 0x424 */
323	u32	rx_unicast_packets;	/* 0x428 */
324}__attribute__((packed)) ;
325
326/*
327 * Register bit-field manipulation routines
328 * NOTE: These compile to just a few machine instructions in-line
329 */
330
331typedef	struct {unsigned reg, pos, width;} reg_bit_field_t ;
332
333static unsigned inline _reg_read( struct net_device *dev,  reg_bit_field_t rbf )
334{
335	void * __iomem base = (void *) dev->base_addr;
336	unsigned val ;
337
338	val =  __raw_readl( base + rbf.reg);
339	val >>= rbf.pos;
340	val &= (1 << rbf.width)-1;
341
342	return val;
343}
344
345static void inline _reg_write( struct net_device *dev,
346	reg_bit_field_t rbf, unsigned newval )
347{
348	void * __iomem base = (void *) dev->base_addr;
349	unsigned val, msk ;
350
351	msk = (1 << rbf.width)-1;
352	msk <<= rbf.pos;
353	newval <<= rbf.pos;
354	newval &= msk ;
355
356	val =  __raw_readl( base + rbf.reg);
357	val &= ~msk ;
358	val |= newval ;
359	__raw_writel( val, base + rbf.reg);
360}
361
362/*
363 * BUFFER DESCRIPTORS
364 */
365#define	GMAC_DESC_SIZE_SHIFT	4	/* Descriptors are 16 bytes in size */
366
367/*
368  This will only work in Little-Endian regime.
369  Descriptors defined witb bit-fields, for best optimization,
370  assuming LSBs get allocated first
371 */
372typedef struct {
373	unsigned
374		desc_rsrvd_0	: 20,	/* Reserved, must be 0 */
375		desc_flags	: 8,	/* Flags, i.e. CRC generation mode */
376		desc_eot	: 1,	/* End-of-Table indicator */
377		desc_int_comp	: 1,	/* Interrupt on completion */
378		desc_eof	: 1,	/* End-of-Frame */
379		desc_sof	: 1,	/* Start-of-Frame */
380		desc_buf_sz	: 14,	/* Data buffer length (bytes) */
381		desc_resrvd_1	: 1,	/* Reserved, must be 0 */
382		desc_addr_ext	: 2,	/* AddrExt, not used, must be 0 */
383		desc_parity	: 1,	/* Parity bit for even desc parity */
384		desc_resrvd_2	: 12;	/* Reserved, must be 0 */
385	u64	desc_data_ptr	: 64;	/* Data buffer 64-bit pointer */
386} gmac_desc_t;
387
388typedef	struct {
389	unsigned
390		rxstat_framelen	: 16,	/* Actual received byte count */
391		rxstat_type	: 2,	/* Type: 0:uni, 1:multi, 2:broadcast */
392		rxstat_vlan	: 1,	/* VLAN Tag detected */
393		rxstat_crc_err	: 1,	/* CRC error */
394		rxstat_oversize	: 1,	/* Oversize, truncated packet */
395		rxstat_ctf	: 1,	/* Processed by CTF */
396		rxstat_ctf_err	: 1,	/* Error in CTF processing */
397		rxstat_oflow	: 1,	/* Overflow in packet reception */
398		rxstat_desc_cnt	: 4,	/* Number of descriptors - 1 */
399		rxstat_datatype	: 4;	/* Data type, not used */
400} gmac_rxstat_t;
401
402#define	GMAC_RX_DESC_COUNT	256	/* Number of Rx Descriptors */
403#define	GMAC_TX_DESC_COUNT	256	/* Number of Tx Descriptors */
404
405/*
406 * PRIVATE DEVICE DATA STRUCTURE
407 */
408
409struct amac_ring {
410	unsigned	count,		/* Total # of elements */
411			ix_in,		/* Producer's index */
412			ix_out,		/* Consumer index */
413			spare;
414};
415
416struct amac_priv {
417	spinlock_t		rx_lock;
418	struct net_device_stats	counters;
419	spinlock_t		tx_lock;
420	struct amac_ring	rx_ring, tx_ring;
421	dma_addr_t		rx_desc_paddr, tx_desc_paddr ;
422	void *			tx_desc_start;
423	void *			rx_desc_start;
424	struct sk_buff 	**	rx_skb,
425			**	tx_skb;
426	struct napi_struct	rx_napi, tx_napi;
427	unsigned		rx_coal_usec, rx_coal_frames;
428	unsigned		tx_coal_usec, tx_coal_frames;
429	bool			tx_active;
430	u8			unit;
431} ;
432
433/*
434 * Forward declarations
435 */
436static int amac_tx_fini( struct net_device * dev, unsigned q, unsigned quota );
437static int amac_rx_fill( struct net_device * dev, int quant );
438static irqreturn_t  amac_interrupt( int irq,  void * _dev );
439
440/*
441 * Ring manipulation routines
442 */
443static int inline _ring_put( struct amac_ring * r )
444{
445	int ix, ret;
446
447	ix = r->ix_in + 1;
448	if( ix >= r->count )
449		ix = 0;
450	if( ix == r->ix_out )
451		return -1;
452	ret = r->ix_in;
453	r->ix_in = ix;
454	return ret;
455}
456
457static int inline _ring_get( struct amac_ring * r, unsigned stop_ix )
458{
459	int ix;
460
461	if( r->ix_in == r->ix_out )
462		return -1;
463	if( r->ix_out == stop_ix )
464		return -2;
465	ix = r->ix_out ;
466	r->ix_out = ix + 1;
467	if( r->ix_out >= r->count )
468		r->ix_out = 0;
469	return ix;
470}
471
472static int inline _ring_room( struct amac_ring * r )
473{
474	int i;
475
476	i = r->ix_out;
477	if( i <= r->ix_in )
478		i += r->count;
479	i = i - r->ix_in - 1 ;
480
481	BUG_ON(i > r->count || i <= 0 );
482	return i;
483}
484
485static int inline _ring_members( struct amac_ring * r )
486{
487	int i;
488
489	if( r->ix_in >= r->ix_out )
490		i = r->ix_in - r->ix_out ;
491	else
492		i = r->count + r->ix_in - r->ix_out ;
493
494	BUG_ON(i >= r->count || i < 0 );
495	return i;
496}
497
498static void amac_desc_parity( gmac_desc_t *d )
499{
500	u32 r, * a = (void *) d;
501
502	r = a[0] ^ a[1] ^ a[2] ^ a[3];
503	r = 0xFFFF & ( r ^ (r>>16) );
504	r = 0xFF & ( r ^ (r>>8) );
505	r = 0xF & ( r ^ (r>>4) );
506	r = 0x3 & ( r ^ (r>>2) );
507	r = 0x1 & ( r ^ (r>>1) );
508	d->desc_parity ^= r ;
509}
510
511static void amac_tx_show( struct net_device * dev )
512{
513	struct amac_priv * priv = netdev_priv(dev);
514	unsigned q = 0;
515
516	printk("%s: Tx ring in %#x out %#x "
517		"Curr %#x Act %#x Last %#x State %#x Err %#x\n"
518		,
519		dev->name,
520		priv->tx_ring.ix_in,
521		priv->tx_ring.ix_out,
522		_reg_read(dev,  GMAC_TXSTAT_CURR_DESC(q)),
523		_reg_read(dev,  GMAC_TXSTAT_ACT_DESC(q)),
524		_reg_read(dev,  GMAC_TX_PTR(q)),
525		_reg_read(dev, GMAC_TXSTAT_TXSTATE(0)),
526		_reg_read(dev, GMAC_TXSTAT_TXERR(0))
527		);
528}
529
530/*
531 * Network device method implementation
532 */
533static void amac_change_rx_flags(struct net_device *dev, int flags)
534{
535	if( dev->flags & IFF_PROMISC )
536		_reg_write( dev, UMAC_CONFIG_PROMISC, 1);
537	else
538		_reg_write( dev, UMAC_CONFIG_PROMISC, 0);
539	/* No MC-filtering in hardware, ignore IFF_ALLMULTI */
540}
541
542static void amac_set_hw_addr(struct net_device *dev)
543{
544	u32 hw_addr[2];
545
546	hw_addr[0] = 	dev->dev_addr[ 3 ] |
547			dev->dev_addr[ 2 ] << 8 |
548			dev->dev_addr[ 1 ] << 16 |
549			dev->dev_addr[ 0 ] << 24;
550	hw_addr[1] =	dev->dev_addr[ 5 ] |
551			dev->dev_addr[ 4 ] << 8;
552
553	_reg_write( dev, UMAC_MACADDR_LOW, hw_addr[0] );
554	_reg_write( dev, UMAC_MACADDR_HIGH, hw_addr[1] );
555}
556
557/* SHOULD BE DELETED ? */
558static void amac_set_mc_list(struct net_device *dev)
559{
560
561
562}
563
564static struct rtnl_link_stats64 * amac_get_stats64(
565		struct net_device *dev,
566		struct rtnl_link_stats64 * dest)
567{
568	struct gmac_mib_counters * mib_regs = (void *) GMAC_MIB_COUNTERS_OFFSET;
569
570	/* Validate MIB counter structure definition accuracy */
571	WARN_ON( (u32)&mib_regs->rx_unicast_packets != (0x428) );
572
573	/* Get tx_dropped count from <txq> */
574	dev_txq_stats_fold( dev, dest );
575
576	/* Linux caused rx drops, we have to count these in the driver */
577	dest->rx_dropped 	= dev->stats.rx_dropped ;
578
579	/* there is no register to count these */
580	dest->rx_fifo_errors	= dev->stats.rx_fifo_errors ;
581
582	mib_regs = (void *)(dev->base_addr + GMAC_MIB_COUNTERS_OFFSET);
583
584	/* Get the appropriate MIB counters */
585	dest->rx_packets	= mib_regs->rx_packets ;
586	dest->tx_packets	= mib_regs->tx_packets ;
587
588	dest->multicast 	= mib_regs->multicast ;
589	dest->collisions 	= mib_regs->collisions ;
590
591	dest->rx_length_errors	= mib_regs->rx_length_errors ;
592	dest->rx_over_errors	= mib_regs->rx_over_errors ;
593	dest->rx_crc_errors	= mib_regs->rx_crc_errors ;
594	dest->rx_frame_errors	= mib_regs->rx_frame_errors ;
595	dest->rx_missed_errors	= mib_regs->rx_missed_errors ;
596
597	dest->tx_aborted_errors	= mib_regs->tx_aborted_errors ;
598	dest->tx_fifo_errors	= mib_regs->tx_fifo_errors ;
599	dest->tx_window_errors	= mib_regs->tx_window_errors ;
600	dest->tx_heartbeat_errors	= mib_regs->tx_m_col_packets ;
601
602
603	/* These are cummulative error counts for all types of errors */
604	dest->rx_errors 	=
605		dest->rx_length_errors	+
606		dest->rx_over_errors	+
607		dest->rx_crc_errors	+
608		dest->rx_frame_errors	+
609		dest->rx_fifo_errors	+
610		dest->rx_missed_errors	;
611	dest->tx_errors 	=
612		dest->tx_aborted_errors	+
613		dest->tx_carrier_errors	+
614		dest->tx_fifo_errors	+
615		dest->tx_window_errors	+
616		dest->tx_heartbeat_errors;
617
618	/* These are 64-bit MIB counters */
619	dest->rx_bytes		= mib_regs->rx_bytes ;
620	dest->tx_bytes		= mib_regs->tx_bytes ;
621
622	return dest ;
623}
624
625#ifdef CONFIG_NET_POLL_CONTROLLER
626static void amac_poll_controller(struct net_device *dev)
627{
628	u32 int_msk ;
629
630	/* Disable device interrupts */
631	int_msk = _reg_read( dev, GMAC_INTMASK );
632	_reg_write( dev, GMAC_INTMASN, 0 );
633
634	/* Call the interrupt service routine */
635	amac_interrupt, dev->irq, dev );
636
637	/* Re-enable interrupts */
638	_reg_write( dev, GMAC_INTMASN, int_msk );
639}
640#endif
641
642static int amac_rx_pkt( struct net_device *dev, struct sk_buff * skb)
643{
644	struct amac_priv * priv = netdev_priv(dev);
645	gmac_rxstat_t rxstat ;
646	unsigned rx_len;
647
648	/* Mark ownership */
649	skb->dev = dev ;
650
651	/* Fetch <rxstat> from start of data buffer */
652	memcpy( &rxstat, skb->data, sizeof(rxstat) );
653
654	/* Adjust valid packet length is <skb> */
655	rx_len = rxstat.rxstat_framelen;
656	skb_put(skb, rx_len+sizeof(rxstat) );
657	skb_pull( skb, sizeof(rxstat) );
658
659	/* If bad packet, count errors, otherwise ingest it */
660	if( rxstat.rxstat_crc_err ) {
661		priv->counters.rx_crc_errors ++ ;
662		goto _pkt_err;
663	} else if( rxstat.rxstat_oversize ) {
664		priv->counters.rx_over_errors ++ ;
665		goto _pkt_err;
666	} else if( rxstat.rxstat_oflow ) {
667		priv->counters.rx_fifo_errors ++ ;
668		goto _pkt_err;
669	}
670
671	/* Must be good packet, handle it */
672	skb->protocol = eth_type_trans( skb, dev );
673
674	priv->counters.rx_packets ++;
675	priv->counters.rx_bytes += skb->len;
676	return( netif_receive_skb( skb ) );
677
678_pkt_err:
679	priv->counters.rx_errors ++ ;
680	dev_kfree_skb_any( skb );
681	return NET_RX_SUCCESS ;
682}
683
684static int amac_rx_do( struct net_device *dev, int quota, bool * done )
685{
686	struct amac_priv * priv = netdev_priv(dev);
687	struct sk_buff * skb ;
688	gmac_desc_t * rx_desc ;
689	unsigned curr_rx_ix, npackets = 0 ;
690	int ix, res;
691
692	* done = false ;
693
694	/* Get currently used Rx descriptor index */
695	curr_rx_ix = _reg_read( dev,  GMAC_RXSTAT_CURR_DESC );
696	curr_rx_ix >>= GMAC_DESC_SIZE_SHIFT ;
697	BUG_ON( curr_rx_ix >= priv->rx_ring.count );
698
699	while( npackets < quota) {
700		if( ! spin_trylock( & priv->rx_lock ) ) {
701#ifdef	_EXTRA_DEBUG
702			printk("%s: lock busy\n", __FUNCTION__ );
703#endif
704			return npackets ;
705			}
706		ix = _ring_get( &priv->rx_ring, curr_rx_ix );
707		spin_unlock( & priv->rx_lock );
708
709		if( ix < 0 ) {
710			* done = true ;
711			break;
712			}
713
714#ifdef	_EXTRA_DEBUG
715		printk("%s: ix=%#x\n", __FUNCTION__, ix );
716#endif
717
718		/* Process the descriptor */
719		rx_desc = priv->rx_desc_start + (ix<<GMAC_DESC_SIZE_SHIFT);
720
721		/* Unmap buffer from DMA */
722		dma_unmap_single( &dev->dev, rx_desc->desc_data_ptr,
723			rx_desc->desc_buf_sz, DMA_FROM_DEVICE );
724
725		/* Extract <skb> from descriptor */
726		skb = priv->rx_skb[ ix ];
727
728		/* Clear descriptor */
729		rx_desc->desc_data_ptr = 0;
730		rx_desc->desc_buf_sz = 0;
731		priv->rx_skb[ ix ] = NULL;
732
733		/* Process incoming packet */
734		res = amac_rx_pkt( dev, skb );
735
736		/* Stop if kernel is congested */
737		if( res == NET_RX_DROP ) {
738			dev->stats.rx_dropped ++;
739			break;
740			}
741
742		/* Count processed packets */
743		npackets ++ ;
744	}
745	return npackets ;
746}
747
748static int amac_rx_fill( struct net_device * dev, int quant )
749{
750	struct amac_priv * priv = netdev_priv(dev);
751	struct sk_buff * skb ;
752	gmac_desc_t * rx_desc ;
753	unsigned room, size, off, count = 0;
754	unsigned saved_ix_in;
755	int ix, last_ix= -1 ;
756	dma_addr_t paddr;
757
758	/* All Rx buffers over 2K for now */
759	size = AMAC_MAX_PACKET + sizeof(gmac_rxstat_t) + L1_CACHE_BYTES ;
760
761	BUG_ON( size <= dev->mtu + sizeof(gmac_rxstat_t));
762
763	do {
764		if( ! spin_trylock( &priv->rx_lock )) {
765#ifdef	_EXTRA_DEBUG
766			printk("%s: lock busy\n", __FUNCTION__ );
767#endif
768			break;
769		}
770		saved_ix_in = priv->rx_ring.ix_in;
771		ix = _ring_put( &priv->rx_ring );
772		spin_unlock( &priv->rx_lock );
773
774		if( ix < 0 )
775			break;
776
777#ifdef	_EXTRA_DEBUG
778		printk("%s: ix=%#x\n", __FUNCTION__, ix );
779#endif
780
781		/* Bail if slot is not empty (should not happen) */
782		if( unlikely(priv->rx_skb[ ix ]) )
783			continue;
784
785		/* Fill a buffer into empty descriptor */
786		rx_desc = priv->rx_desc_start + (ix<<GMAC_DESC_SIZE_SHIFT);
787
788		/* Allocate new buffer */
789		skb = dev_alloc_skb( size );
790		if( IS_ERR_OR_NULL(skb) )
791			break;
792
793		/* Mark ownership */
794		skb->dev = dev ;
795
796		/* Save <skb> pointer */
797		priv->rx_skb[ ix ] = skb;
798
799		/* Update descriptor with new buffer */
800		BUG_ON( rx_desc->desc_data_ptr );
801
802		room = skb_tailroom( skb );
803
804		paddr = dma_map_single( &dev->dev, skb->data, room,
805				 DMA_FROM_DEVICE );
806
807		if( dma_mapping_error( &dev->dev, paddr )) {
808			printk_ratelimited(KERN_WARNING
809				"%s: failed to map Rx buffer\n", dev->name);
810			priv->rx_skb[ ix ] = NULL;
811			priv->rx_ring.ix_in = saved_ix_in ;
812			dev_kfree_skb_any( skb );
813			break;
814		}
815
816		rx_desc->desc_parity = 0;
817		rx_desc->desc_data_ptr =  paddr ;
818		rx_desc->desc_buf_sz = room ;
819		rx_desc->desc_sof =
820		rx_desc->desc_eof = 1;	/* One pakcet per desc */
821
822		/* Mark EoT for last desc */
823		if( (ix+1) == priv->tx_ring.count )
824			rx_desc->desc_eot = 1;
825
826		/* calculate and set descriptor parity */
827		amac_desc_parity( rx_desc );
828
829		count ++ ;
830		last_ix = ix;
831		} while( --quant > 0 );
832
833	barrier();
834
835	/* Tell DMA where the last valid descriptor is */
836	if( last_ix >= 0 ) {
837		if( (++ last_ix) >= priv->rx_ring.count )
838			last_ix = 0;
839		off = last_ix << GMAC_DESC_SIZE_SHIFT ;
840		_reg_write( dev, GMAC_RX_PTR, priv->rx_desc_paddr + off);
841	}
842	return count ;
843}
844
845/*
846 * NAPI polling functions
847 */
848static int amac_rx_poll( struct napi_struct *napi, int budget )
849{
850	struct net_device * dev = napi->dev;
851	struct amac_priv * priv = netdev_priv(dev);
852	unsigned npackets, nbuffs, i ;
853	bool rx_done ;
854
855	npackets = amac_rx_do( dev, budget, & rx_done );
856
857	/* out of budget, come back for more */
858	if( npackets >= budget )
859		return npackets ;
860
861	/* If too few Rx Buffers, fill up more */
862	nbuffs = _ring_members( &priv->rx_ring );
863
864	/* Keep Rx buffs between <budget> and 2*<budget> */
865	i = min( (budget << 1) - nbuffs, budget - npackets - 1);
866	npackets += amac_rx_fill( dev, i );
867
868	/* Must not call napi_complete() if used up all budget */
869	if( npackets >= budget )
870		return npackets ;
871
872	if( rx_done ) {
873		/* Done for now, re-enable interrupts */
874		napi_complete(napi);
875		_reg_write( dev, GMAC_INTMASK_RX_INT, 1);
876	}
877
878	return npackets ;
879}
880
881static int amac_tx_poll( struct napi_struct *napi, int budget )
882{
883	struct net_device * dev = napi->dev;
884	struct amac_priv * priv = netdev_priv(dev);
885	unsigned count ;
886	const unsigned q = 0 ;
887
888	count = amac_tx_fini( dev, q, budget );
889
890	if( count >= budget )
891		return count ;
892
893	if( ! priv->tx_active ) {
894		napi_complete(napi);
895		_reg_write( dev, GMAC_INTMASK_TX_INT(q), 1);
896		}
897
898	return count ;
899}
900
901static irqreturn_t  amac_interrupt( int irq,  void * _dev )
902{
903	struct net_device * dev = _dev ;
904	struct amac_priv * priv = netdev_priv(dev);
905	const unsigned q = 0;
906	u32 reg ;
907
908	reg = _reg_read( dev, GMAC_INTSTAT);
909
910#ifdef	_EXTRA_DEBUG
911	if( reg ) {
912		char msg[32] = "";
913		if( _reg_read( dev, GMAC_INTSTAT_RX_INT) )
914			strcat( msg, "Rx ");
915		if( _reg_read( dev, GMAC_INTSTAT_TX_INT(q)))
916			strcat( msg, "Tx ");
917		if( _reg_read( dev, GMAC_INTSTAT_TIMER_INT))
918			strcat( msg, "W ");
919		printk("%s: %s\n", __FUNCTION__, msg );
920	}
921#endif
922
923	if( reg == 0 )
924		return IRQ_NONE;
925
926	/* Decode interrupt causes */
927	if( _reg_read( dev, GMAC_INTSTAT_RX_INT) ) {
928		/* Disable Rx interrupt */
929		_reg_write( dev, GMAC_INTMASK_RX_INT, 0);
930		_reg_write( dev, GMAC_INTSTAT_RX_INT, 1);
931		/* Schedule Rx processing */
932		napi_schedule( &priv->rx_napi );
933		}
934	if( _reg_read( dev, GMAC_INTSTAT_TX_INT(q)) ||
935	    _reg_read( dev, GMAC_INTSTAT_TIMER_INT)) {
936
937		/* Shut-off Tx/Timer interrupts */
938		_reg_write( dev, GMAC_TIMER, 0 );
939		_reg_write( dev, GMAC_INTMASK_TIMER_INT, 0);
940		_reg_write( dev, GMAC_INTSTAT_TIMER_INT, 1);
941		_reg_write( dev, GMAC_INTMASK_TX_INT(q), 0);
942		_reg_write( dev, GMAC_INTSTAT_TX_INT(q), 1);
943
944		/* Schedule cleanup of Tx buffers */
945		napi_schedule( &priv->tx_napi );
946		}
947	if( _reg_read( dev, GMAC_INTSTAT_MII_LINK_CHANGE)) {
948		printk_ratelimited( "%s: MII Link Change\n", dev->name );
949		_reg_write( dev, GMAC_INTSTAT_MII_LINK_CHANGE, 1);
950		}
951	if( _reg_read( dev, GMAC_INTSTAT_SW_LINK_CHANGE)) {
952		printk_ratelimited( "%s: Switch Link Change\n", dev->name );
953		_reg_write( dev, GMAC_INTSTAT_SW_LINK_CHANGE, 1);
954		goto _dma_error;
955		}
956	if( _reg_read( dev, GMAC_INTSTAT_DMA_DESC_ERR)) {
957		printk_ratelimited( "%s: DMA Desciptor Error\n", dev->name );
958		napi_schedule( &priv->tx_napi );
959		goto _dma_error;
960		}
961	if( _reg_read( dev, GMAC_INTSTAT_DMA_DATA_ERR)) {
962		printk_ratelimited( "%s: DMA Data Error\n", dev->name );
963		_reg_write( dev, GMAC_INTSTAT_DMA_DATA_ERR, 1);
964		goto _dma_error;
965		}
966	if( _reg_read( dev, GMAC_INTSTAT_DMA_PROTO_ERR)) {
967		printk_ratelimited( "%s: DMA Protocol Error\n", dev->name );
968		_reg_write( dev, GMAC_INTSTAT_DMA_PROTO_ERR, 1);
969		goto _dma_error;
970		}
971	if( _reg_read( dev, GMAC_INTSTAT_DMA_RX_UNDERFLOW)) {
972		printk_ratelimited( "%s: DMA Rx Undeflow\n", dev->name );
973		dev->stats.rx_fifo_errors ++ ;
974		_reg_write( dev, GMAC_INTSTAT_DMA_RX_UNDERFLOW, 1);
975		goto _dma_error;
976		}
977	if( _reg_read( dev, GMAC_INTSTAT_DMA_RX_OVERFLOW)) {
978		printk_ratelimited( "%s: DMA Rx Overflow\n", dev->name );
979		dev->stats.rx_fifo_errors ++ ;
980		_reg_write( dev, GMAC_INTSTAT_DMA_RX_OVERFLOW, 1);
981		goto _dma_error;
982		}
983	if( _reg_read( dev, GMAC_INTSTAT_DMA_TX_UNDERFLOW)) {
984		printk_ratelimited( "%s: DMA Rx Underflow\n", dev->name );
985		_reg_write( dev, GMAC_INTSTAT_DMA_TX_UNDERFLOW, 1);
986		goto _dma_error;
987		}
988
989	return IRQ_HANDLED;
990
991_dma_error:
992	napi_schedule( &priv->tx_napi );
993	return IRQ_HANDLED;
994}
995
996
997static u16 amac_select_queue(struct net_device *dev, struct sk_buff *skb)
998{
999/* Don't know how to do this yet */
1000return 0;
1001}
1002
1003static int amac_tx_fini( struct net_device * dev, unsigned q, unsigned quota )
1004{
1005	struct amac_priv * priv = netdev_priv(dev);
1006	gmac_desc_t * tx_desc ;
1007	struct sk_buff * skb ;
1008	unsigned curr_tx_ix, count = 0 ;
1009	int ix;
1010
1011	/* Get currently used Tx descriptor index */
1012	curr_tx_ix = _reg_read( dev,  GMAC_TXSTAT_CURR_DESC(q) );
1013	curr_tx_ix >>= GMAC_DESC_SIZE_SHIFT ;
1014	BUG_ON( curr_tx_ix >= priv->tx_ring.count );
1015
1016	while( count < quota ) {
1017		if( ! spin_trylock( &priv->tx_lock )) {
1018#ifdef	_EXTRA_DEBUG
1019			printk("%s: lock busy\n", __FUNCTION__);
1020#endif
1021			break;
1022		}
1023		ix = _ring_get( &priv->tx_ring, curr_tx_ix);
1024		spin_unlock( &priv->tx_lock );
1025
1026		if( ix < 0 ) {
1027			priv->tx_active = false ;
1028			break;
1029			}
1030
1031#ifdef	_EXTRA_DEBUG
1032		printk("%s: ix=%#x curr_ix+%#x\n", __FUNCTION__, ix, curr_tx_ix );
1033#endif
1034
1035		if( unlikely( priv->tx_skb[ix] == NULL)) {
1036			priv->tx_active = false ;
1037			break;
1038			}
1039
1040		tx_desc = priv->tx_desc_start + (ix<<GMAC_DESC_SIZE_SHIFT);
1041
1042		/* Unmap <skb> from DMA */
1043		dma_unmap_single( &dev->dev, tx_desc->desc_data_ptr,
1044			tx_desc->desc_buf_sz, DMA_TO_DEVICE );
1045
1046		/* get <skb> to release */
1047		skb = priv->tx_skb[ ix ];
1048		priv->tx_skb[ ix ] = NULL;
1049
1050		/* Mark descriptor free */
1051		memset( tx_desc, 0, sizeof( * tx_desc ));
1052
1053		/* Release <skb> */
1054		dev_kfree_skb_any( skb );
1055
1056		count ++ ;
1057	}
1058
1059	/* Resume stalled transmission */
1060	if( count && netif_queue_stopped( dev ))
1061		netif_wake_queue( dev );
1062
1063	return count ;
1064}
1065
1066static netdev_tx_t amac_start_xmit(struct sk_buff *skb, struct net_device *dev)
1067{
1068	struct amac_priv * priv = netdev_priv(dev);
1069	dma_addr_t paddr;
1070	gmac_desc_t *tx_desc, desc ;
1071	u16 len;
1072	int ix, room;
1073	unsigned off;
1074	unsigned const q=0;
1075
1076	BUG_ON( skb_shinfo(skb)->nr_frags != 0 );/* S/G not implemented yet */
1077
1078	/* tx_lock already taken at device level */
1079	ix = _ring_put( &priv->tx_ring );
1080
1081#ifdef	_EXTRA_DEBUG
1082	printk("%s: ix=%#x\n", __FUNCTION__, ix );
1083#endif
1084
1085	if( ix < 0 )
1086		return NETDEV_TX_BUSY;
1087
1088	if( priv->tx_skb[ ix ] != NULL )
1089		return NETDEV_TX_BUSY;
1090
1091	len = skb->len ;
1092
1093	/* Map <skb> into Tx descriptor */
1094	paddr = dma_map_single( &dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1095	if( dma_mapping_error( &dev->dev, paddr )) {
1096		printk(KERN_WARNING "%s: Tx DMA map failed\n", dev->name);
1097		return NETDEV_TX_BUSY ;
1098	}
1099
1100	/* Save <skb> pointer */
1101	priv->tx_skb[ ix ] = skb ;
1102
1103	/* Fill in the Tx Descriptor */
1104	tx_desc = priv->tx_desc_start + (ix << GMAC_DESC_SIZE_SHIFT) ;
1105
1106	/* Prep descriptor */
1107	memset( &desc, 0, sizeof(desc));
1108
1109	desc.desc_flags = 0x0;	/* Append CRC */
1110	desc.desc_parity = 0;
1111	desc.desc_buf_sz = len;
1112	desc.desc_data_ptr = paddr;
1113
1114	/* Be sure the descriptor is in memory before the device reads it */
1115	desc.desc_eof = 1;		/* One pakcet per desc (for now) */
1116	desc.desc_sof = 1;
1117	desc.desc_int_comp = 0;
1118
1119	/* Mark EoT for last desc */
1120	if( (ix+1) == priv->tx_ring.count )
1121		desc.desc_eot = 1;
1122
1123	/* Interrupt once for every 64 packets transmitted */
1124	if( (ix & 0x3f) == 0x3f && ! priv->tx_active ) {
1125		desc.desc_int_comp = 1;
1126		priv->tx_active = true;
1127	}
1128
1129	/* calculate and set descriptor parity */
1130	amac_desc_parity( &desc );
1131
1132	/* Get current LastDesc */
1133	off =_reg_read( dev, GMAC_TX_PTR(q) );
1134	off >>= GMAC_DESC_SIZE_SHIFT ;
1135	BUG_ON( off >= priv->tx_ring.count );
1136
1137	/* Need to suspend Tx DMA if writing into LastDesc */
1138	if( off == ix )
1139		_reg_write( dev, GMAC_TXCTL_SUSPEND(q), 1);
1140
1141	/* Write descriptor at once to memory */
1142	*tx_desc = desc ;
1143	barrier();
1144
1145	dev->trans_start = jiffies ;
1146
1147	/* Stop transmission if ran out of descriptors */
1148	room =_ring_room( &priv->tx_ring) ;
1149
1150	if( room <= 0 )
1151		netif_stop_queue( dev );
1152
1153	/* Update stats */
1154	priv->counters.tx_packets ++ ;
1155	priv->counters.tx_bytes += len;
1156
1157	/* Kick the hardware */
1158	if( off == ix )
1159		_reg_write( dev, GMAC_TXCTL_SUSPEND(q), 0);
1160
1161	off = (priv->tx_ring.ix_in) << GMAC_DESC_SIZE_SHIFT ;
1162	_reg_write( dev, GMAC_TX_PTR(q), priv->tx_desc_paddr + off);
1163
1164	/* Reset timer */
1165	if( ! priv->tx_active ) {
1166		_reg_write( dev, GMAC_TIMER, 500000000 );
1167		_reg_write( dev, GMAC_INTSTAT_TIMER_INT, 1);
1168		_reg_write( dev, GMAC_INTMASK_TIMER_INT, 1);
1169		priv->tx_active = true ;
1170		}
1171
1172	return NETDEV_TX_OK;
1173}
1174
1175static void amac_tx_timeout(struct net_device *dev)
1176{
1177	struct amac_priv * priv = netdev_priv(dev);
1178
1179	printk(KERN_WARNING "%s: Tx timeout\n", dev->name );
1180
1181	napi_schedule( &priv->tx_napi );
1182}
1183
1184
1185static int amac_open(struct net_device *dev)
1186{
1187	struct amac_priv * priv = netdev_priv(dev);
1188	gmac_desc_t * desc ;
1189	const unsigned q = 0;
1190	int res = 0;
1191
1192	/* Setup interrupt service routine */
1193	res = request_irq( dev->irq, amac_interrupt, 0, dev->name, dev );
1194
1195	if( res != 0 ) goto _fail_irq;
1196
1197	/* GMAC descriptors have full 64-bit address pointers */
1198	dma_set_coherent_mask( &dev->dev, DMA_BIT_MASK(64));
1199
1200	/* Set MAC address */
1201	amac_set_hw_addr( dev );
1202
1203
1204	/* Initialize rings */
1205	priv->tx_ring.count = GMAC_TX_DESC_COUNT;
1206	priv->rx_ring.count = GMAC_RX_DESC_COUNT;
1207	priv->rx_ring.ix_in = priv->rx_ring.ix_out = 0;
1208	priv->tx_ring.ix_in = priv->tx_ring.ix_out = 0;
1209
1210	priv->rx_desc_start = dma_alloc_coherent( & dev->dev,
1211				priv->rx_ring.count << GMAC_DESC_SIZE_SHIFT,
1212				&priv->rx_desc_paddr, GFP_KERNEL );
1213
1214	if( IS_ERR_OR_NULL( priv->rx_desc_start ))
1215		goto _fail_desc_alloc;
1216
1217	/* Verify the descritors are aligned as needed */
1218	if( priv->rx_ring.count <=256)
1219		BUG_ON( priv->rx_desc_paddr & (SZ_4K-1));
1220	else
1221		BUG_ON( priv->rx_desc_paddr & (SZ_8K-1));
1222
1223	priv->tx_desc_start = dma_alloc_coherent( & dev->dev,
1224				priv->tx_ring.count << GMAC_DESC_SIZE_SHIFT,
1225				&priv->tx_desc_paddr, GFP_KERNEL );
1226
1227	if( IS_ERR_OR_NULL( priv->tx_desc_start ))
1228		goto _fail_desc_alloc;
1229
1230	/* Verify the descritors are aligned as needed */
1231	if( priv->tx_ring.count <=256)
1232		BUG_ON( priv->tx_desc_paddr & (SZ_4K-1));
1233	else
1234		BUG_ON( priv->tx_desc_paddr & (SZ_8K-1));
1235
1236	/* Initialize descriptors */
1237	memset( priv->tx_desc_start, 0,
1238		priv->tx_ring.count << GMAC_DESC_SIZE_SHIFT );
1239	memset( priv->rx_desc_start, 0,
1240		priv->rx_ring.count << GMAC_DESC_SIZE_SHIFT );
1241
1242	/* Mark last descriptors with EOT */
1243	desc = priv->tx_desc_start +
1244		((priv->tx_ring.count-1)<<GMAC_DESC_SIZE_SHIFT);
1245	desc->desc_eot = 1 ;
1246
1247	desc = priv->rx_desc_start +
1248		((priv->rx_ring.count-1)<<GMAC_DESC_SIZE_SHIFT);
1249	desc->desc_eot = 1 ;
1250
1251	/* Alloc auxiliary <skb> pointer arrays */
1252	priv->rx_skb = kmalloc( sizeof(struct skbuff *) * priv->rx_ring.count,
1253		GFP_KERNEL );
1254	priv->tx_skb = kmalloc( sizeof(struct skbuff *) * priv->tx_ring.count,
1255		GFP_KERNEL );
1256	if( NULL == priv->tx_skb || NULL == priv->rx_skb )
1257		goto _fail_alloc;
1258	memset( priv->rx_skb, 0, sizeof(struct skbuff *) * priv->rx_ring.count);
1259	memset( priv->tx_skb, 0, sizeof(struct skbuff *) * priv->tx_ring.count);
1260
1261	/* Enable hardware */
1262	barrier();
1263
1264	/* Write physical address of descriptor tables */
1265	_reg_write( dev, GMAC_RX_ADDR_LOW, priv->rx_desc_paddr );
1266	_reg_write( dev, GMAC_RX_ADDR_HIGH, (u64)priv->rx_desc_paddr >> 32);
1267	_reg_write( dev, GMAC_TX_ADDR_LOW(q), priv->tx_desc_paddr );
1268	_reg_write( dev, GMAC_TX_ADDR_HIGH(q), (u64)priv->tx_desc_paddr >> 32);
1269
1270	/* Set Other Rx control parameters */
1271	_reg_write( dev, GMAC_RXCTL, 0);
1272	_reg_write( dev, GMAC_RXCTL_RX_OFFSET, sizeof( gmac_rxstat_t ));
1273	_reg_write( dev, GMAC_RXCTL_OFLOW_CONT, 1);
1274	_reg_write( dev, GMAC_RXCTL_PARITY_DIS, 0);
1275	_reg_write( dev, GMAC_RXCTL_BURST_LEN, 1);	/* 32-bytes */
1276
1277#ifdef	_EXTRA_DEBUG
1278	printk("UniMAC config=%#x mac stat %#x\n",
1279		_reg_read(dev, UMAC_CONFIG), _reg_read(dev, UMAC_MAC_STAT));
1280#endif
1281
1282	/* Enable UniMAC */
1283	_reg_write( dev, UMAC_FRM_LENGTH, AMAC_MAX_PACKET);
1284
1285	_reg_write( dev, UMAC_CONFIG_ETH_SPEED, 2);	/* 1Gbps */
1286	_reg_write( dev, UMAC_CONFIG_CRC_FW, 0);
1287	_reg_write( dev, UMAC_CONFIG_LNGTHCHK_DIS, 1);
1288	_reg_write( dev, UMAC_CONFIG_CNTLFRM_EN, 0);
1289	_reg_write( dev, UMAC_CONFIG_PROMISC, 0);
1290	_reg_write( dev, UMAC_CONFIG_LCL_LOOP_EN, 0);
1291	_reg_write( dev, UMAC_CONFIG_RMT_LOOP_EN, 0);
1292	_reg_write( dev, UMAC_CONFIG_TXRX_AUTO_EN, 0);
1293	_reg_write( dev, UMAC_CONFIG_AUTO_EN, 0);
1294	_reg_write( dev, UMAC_CONFIG_TX_ADDR_INS, 0);
1295	_reg_write( dev, UMAC_CONFIG_PAD_EN, 0);
1296	_reg_write( dev, UMAC_CONFIG_PREAMB_EN, 0);
1297	_reg_write( dev, UMAC_CONFIG_TX_EN, 1);
1298	_reg_write( dev, UMAC_CONFIG_RX_EN, 1);
1299
1300	/* Configure GMAC */
1301	_reg_write( dev, GMAC_CTL_FLOW_CNTLSRC, 0);
1302	_reg_write( dev, GMAC_CTL_RX_OVFLOW_MODE, 0);
1303	_reg_write( dev, GMAC_CTL_MIB_RESET, 0);
1304	_reg_write( dev, GMAC_CTL_LINKSTAT_SEL, 1);
1305	_reg_write( dev, GMAC_CTL_FLOW_CNTL_MODE, 0);
1306	_reg_write( dev, GMAC_CTL_NWAY_AUTO_POLL, 1);
1307
1308	/* Set Tx DMA control bits */
1309	_reg_write( dev, GMAC_TXCTL(q), 0);
1310	_reg_write( dev, GMAC_TXCTL_PARITY_DIS(q), 0);
1311	_reg_write( dev, GMAC_TXCTL_BURST_LEN(q), 1);	/* 32-bytes */
1312	_reg_write( dev, GMAC_TXCTL_DNA_ACT_INDEX(q), 1);/* for debug */
1313
1314	/* Enable Tx DMA */
1315	_reg_write( dev, GMAC_TXCTL_TX_EN(q), 1);
1316
1317	/* Enable Rx DMA */
1318	_reg_write( dev, GMAC_RXCTL_RX_EN, 1);
1319
1320	/* Fill Rx queue - works only after Rx DMA is enabled */
1321	amac_rx_fill( dev, 64*2 );
1322
1323	/* Install NAPI instance */
1324	netif_napi_add( dev, &priv->rx_napi, amac_rx_poll, 64 );
1325	netif_napi_add( dev, &priv->tx_napi, amac_tx_poll, 64 );
1326
1327	/* Enable NAPI right away */
1328	napi_enable( &priv->rx_napi );
1329	napi_enable( &priv->tx_napi );
1330
1331	/* Enable interrupts */
1332	_reg_write( dev, GMAC_INTMASK_RX_INT, 1);
1333	_reg_write( dev, GMAC_INTMASK_TX_INT(q), 1);
1334	_reg_write( dev, GMAC_INTMASK_MII_LINK_CHANGE, 1);
1335	_reg_write( dev, GMAC_INTMASK_SW_LINK_CHANGE, 1);
1336	_reg_write( dev, GMAC_INTMASK_DMA_DESC_ERR, 1);
1337	_reg_write( dev, GMAC_INTMASK_DMA_DATA_ERR, 1);
1338	_reg_write( dev, GMAC_INTMASK_DMA_PROTO_ERR, 1);
1339	_reg_write( dev, GMAC_INTMASK_DMA_RX_UNDERFLOW, 1);
1340	_reg_write( dev, GMAC_INTMASK_DMA_RX_OVERFLOW, 1);
1341	_reg_write( dev, GMAC_INTMASK_DMA_TX_UNDERFLOW, 1);
1342	/* _reg_write( dev, GMAC_INTMASK_TIMER_INT, 1); */
1343
1344	/* Setup lazy Rx interrupts (TBD: ethertool coalesce ?) */
1345	_reg_write( dev, GMAC_INTRX_LZY_TIMEOUT, 125000 );
1346	_reg_write( dev, GMAC_INTRX_LZY_FRMCNT, 16 );
1347
1348	/* Ready to transmit packets */
1349	netif_start_queue( dev );
1350
1351	return res;
1352
1353	/* Various failure exits */
1354_fail_alloc:
1355	if( NULL != priv->tx_skb )
1356		kfree( priv->tx_skb );
1357	if( NULL != priv->rx_skb )
1358		kfree( priv->rx_skb );
1359
1360_fail_desc_alloc:
1361	printk(KERN_WARNING "%s: failed to allocate memory\n", dev->name );
1362	if( NULL != priv->rx_desc_start )
1363		dma_free_coherent( &dev->dev,
1364				priv->rx_ring.count << GMAC_DESC_SIZE_SHIFT,
1365				priv->rx_desc_start, priv->rx_desc_paddr );
1366
1367	if( NULL != priv->tx_desc_start )
1368		dma_free_coherent( &dev->dev,
1369				priv->tx_ring.count << GMAC_DESC_SIZE_SHIFT,
1370				priv->tx_desc_start, priv->tx_desc_paddr );
1371
1372	free_irq( dev->irq, dev );
1373
1374_fail_irq:
1375	return -ENODEV;
1376}
1377
1378static int amac_stop(struct net_device *dev)
1379{
1380	struct amac_priv * priv = netdev_priv(dev);
1381	const unsigned q = 0;
1382	unsigned ix;
1383
1384	/* Stop accepting packets for transmission */
1385	netif_stop_queue( dev );
1386
1387	/* Flush Tx FIFO */
1388	_reg_write( dev, GMAC_CTL_TX_FLUSH, 1);
1389	ndelay(1);
1390
1391
1392	/* Stop hardware */
1393
1394	/* Disable Rx DMA */
1395	_reg_write( dev, GMAC_RXCTL_RX_EN, 0);
1396
1397	/* Disable Tx DMA */
1398	_reg_write( dev, GMAC_TXCTL_TX_EN(q), 0);
1399
1400	/* Disable all interrupts */
1401	_reg_write( dev, GMAC_INTMASK, 0);
1402	barrier();
1403
1404	/* Verify DMA has indeed stopped */
1405	amac_tx_show( dev );
1406
1407	/* Stop NAPI processing */
1408	napi_disable( &priv->rx_napi );
1409	napi_disable( &priv->tx_napi );
1410
1411	/* Stop UniMAC */
1412	_reg_write( dev, UMAC_CONFIG_TX_EN, 0);
1413	_reg_write( dev, UMAC_CONFIG_RX_EN, 0);
1414	_reg_write( dev, UMAC_CONFIG_SW_RESET, 1);
1415
1416	netif_napi_del( &priv->tx_napi );
1417	netif_napi_del( &priv->rx_napi );
1418
1419	/* Unmap any mapped DMA buffers */
1420	for(ix = 0; ix < priv->tx_ring.count; ix ++ ) {
1421		gmac_desc_t * tx_desc =
1422			priv->tx_desc_start + (ix<<GMAC_DESC_SIZE_SHIFT);
1423
1424		if( tx_desc->desc_data_ptr )
1425			dma_unmap_single( &dev->dev, tx_desc->desc_data_ptr,
1426				tx_desc->desc_buf_sz, DMA_TO_DEVICE );
1427	}
1428
1429	for(ix = 0; ix < priv->rx_ring.count; ix ++ ) {
1430		gmac_desc_t * rx_desc =
1431			priv->rx_desc_start + (ix<<GMAC_DESC_SIZE_SHIFT);
1432
1433		if( rx_desc->desc_data_ptr )
1434			dma_unmap_single( &dev->dev, rx_desc->desc_data_ptr,
1435				rx_desc->desc_buf_sz, DMA_FROM_DEVICE );
1436	}
1437
1438	/* Free <skb> that may be pending in the queues */
1439	for(ix = 0; ix < priv->tx_ring.count; ix ++ )
1440		if( priv->tx_skb[ ix ] )
1441			dev_kfree_skb( priv->tx_skb[ ix ] );
1442
1443	for(ix = 0; ix < priv->rx_ring.count; ix ++ )
1444		if( priv->rx_skb[ ix ] )
1445			dev_kfree_skb( priv->rx_skb[ ix ] );
1446
1447	/* Free auxiliary <skb> pointer arrays */
1448	if( NULL != priv->tx_skb )
1449		kfree( priv->tx_skb );
1450	if( NULL != priv->rx_skb )
1451		kfree( priv->rx_skb );
1452
1453	/* Free DMA descriptors */
1454	dma_free_coherent( &dev->dev,
1455		priv->tx_ring.count << GMAC_DESC_SIZE_SHIFT,
1456		priv->tx_desc_start, priv->tx_desc_paddr );
1457
1458	dma_free_coherent( &dev->dev,
1459		priv->rx_ring.count << GMAC_DESC_SIZE_SHIFT,
1460		priv->rx_desc_start, priv->rx_desc_paddr );
1461
1462	priv->tx_ring.count = priv->rx_ring.count = 0;
1463
1464	/* Release interrupt */
1465	free_irq( dev->irq, dev );
1466
1467	_reg_write( dev, UMAC_CONFIG_SW_RESET, 0);
1468	return 0;
1469}
1470
1471/*
1472 * Network device methods
1473 */
1474static struct net_device_ops amac_dev_ops = {
1475	.ndo_open	=	amac_open,
1476	.ndo_stop	=	amac_stop,
1477	.ndo_start_xmit	=	amac_start_xmit,
1478	.ndo_tx_timeout	=	amac_tx_timeout,
1479	.ndo_select_queue =	amac_select_queue,
1480	.ndo_set_multicast_list=amac_set_mc_list,
1481	.ndo_change_rx_flags =	amac_change_rx_flags,
1482	.ndo_get_stats64 =	amac_get_stats64,
1483	.ndo_set_mac_address =	eth_mac_addr,
1484#ifdef CONFIG_NET_POLL_CONTROLLER
1485	.ndo_poll_controller =	amac_poll_controller,
1486#endif
1487};
1488
1489static void __devinit amac_default_mac_addr( struct net_device * dev, u8 unit )
1490{
1491	static const u8 def_hw_addr[] =
1492		{ 0x80, 0xde, 0xad, 0xfa, 0xce, 0x00 };
1493
1494	u32 hw_addr[2];
1495
1496	/* Get pre-set MAC address */
1497	hw_addr[0] = _reg_read( dev, UMAC_MACADDR_LOW );
1498	hw_addr[1] = _reg_read( dev, UMAC_MACADDR_HIGH );
1499	dev->perm_addr[0] = hw_addr[0] >> 24 ;
1500	dev->perm_addr[1] = hw_addr[0] >> 16 ;
1501	dev->perm_addr[2] = hw_addr[0] >> 8 ;
1502	dev->perm_addr[3] = hw_addr[0] ;
1503	dev->perm_addr[4] = hw_addr[1] >> 8 ;
1504	dev->perm_addr[5] = hw_addr[1] ;
1505
1506	if( ! is_valid_ether_addr( &dev->perm_addr[0] ) ) {
1507		memcpy( &dev->perm_addr, def_hw_addr, 6 );
1508		dev->perm_addr[5] ^= unit ;
1509		}
1510
1511	dev->dev_addr = dev->perm_addr;
1512
1513	printk(KERN_INFO "%s: MAC addr %02x-%02x-%02x-%02x-%02x-%02x Driver: %s\n",
1514		dev->name,
1515		dev->perm_addr[0], dev->perm_addr[1], dev->perm_addr[2],
1516		dev->perm_addr[3], dev->perm_addr[4], dev->perm_addr[5],
1517		"$Id: bcm5301x_amac.c 332917 2012-05-11 20:15:35Z $"
1518		);
1519
1520}
1521
1522static int __devinit amac_dev_init( struct net_device * dev, unsigned unit )
1523{
1524	struct amac_priv * priv = netdev_priv(dev);
1525	void * __iomem base;
1526	int res;
1527
1528	/* Reserve resources */
1529	res = request_resource( &iomem_resource, &amac_regs[ unit ] );
1530	if( res != 0) return res ;
1531
1532	/* map registers in virtual space */
1533	base = ioremap(  amac_regs[ unit ].start,
1534			resource_size( &amac_regs[ unit ] ));
1535
1536	if( IS_ERR_OR_NULL(base) ) {
1537		/* Release resources */
1538		release_resource( &amac_regs[ unit ] );
1539		return res;
1540	}
1541
1542	dev->base_addr = (u32) base ;
1543	dev->irq = amac_irqs[ unit ].start ;
1544
1545	/* Install device methods */
1546	dev->netdev_ops = & amac_dev_ops;
1547	/* Install ethertool methods */
1548	/* TBD */
1549
1550	/* Declare features */
1551	dev->features |= /* NETIF_F_SG | */ NETIF_F_HIGHDMA ;
1552
1553	/* Private vars */
1554	memset( priv, 0, sizeof( struct amac_priv ));
1555	priv->unit = unit ;
1556
1557	/* Init spinlock */
1558	spin_lock_init( & priv->tx_lock );
1559	spin_lock_init( & priv->rx_lock );
1560
1561	/* MAC address */
1562	amac_default_mac_addr( dev, unit );
1563
1564	return 0;
1565}
1566
1567static int __init amac_init(void)
1568{
1569	struct net_device *dev;
1570	unsigned i;
1571	int res = 0;
1572
1573	/* Compile-time sanity checks */
1574
1575	if( 1 ) {
1576		gmac_desc_t desc;
1577		gmac_rxstat_t rxs;
1578		u32 r;
1579		u32 d[ 4 ];
1580
1581		BUG_ON( sizeof( gmac_rxstat_t) != sizeof( u32 ) );
1582		BUG_ON( sizeof( gmac_desc_t) != 1 << GMAC_DESC_SIZE_SHIFT);
1583
1584		memset( &d, 0, sizeof(d) );
1585		memset( &desc, 0, sizeof(desc) );
1586		memset( &rxs, 0, sizeof(rxs) );
1587
1588		rxs.rxstat_desc_cnt = 0xa ;
1589		r = 0xa << 24 ;
1590		BUG_ON(memcmp( &rxs, &r, sizeof(rxs)));
1591
1592		desc.desc_buf_sz = 0x400 ;
1593		d[1] = 0x400 ;
1594		BUG_ON( memcmp( & desc, &d, sizeof(desc)));
1595	}
1596
1597	/* Create and registers all instances */
1598	for(i = 0; i < 4; i ++ ) {
1599		dev = alloc_etherdev( sizeof( struct amac_priv ) );
1600		if( IS_ERR_OR_NULL(dev) )
1601			{
1602			if( IS_ERR( dev ))
1603				return PTR_ERR(dev);
1604			else
1605				return -ENOSYS;
1606			}
1607		res = amac_dev_init( dev, i );
1608		if( res != 0 ){
1609			printk(KERN_WARNING "%s: failed to initialize %d\n",
1610				dev->name, res );
1611			continue;
1612			}
1613		res = register_netdev( dev );
1614		if( res != 0 ){
1615			printk(KERN_WARNING "%s: failed to register %d\n",
1616				dev->name, res );
1617		}
1618	}
1619
1620	return res;
1621}
1622
1623device_initcall(amac_init);
1624