• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7800-V1.0.2.28/target/linux/ar71xx/files/drivers/net/ethernet/atheros/ag71xx/
1/*
2 *  Atheros AR71xx built-in ethernet mac driver
3 *
4 *  Copyright (c) 2013 The Linux Foundation. All rights reserved.
5 *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
6 *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 *
8 *  Based on Atheros' AG7100 driver
9 *
10 *  This program is free software; you can redistribute it and/or modify it
11 *  under the terms of the GNU General Public License version 2 as published
12 *  by the Free Software Foundation.
13 */
14
15#include "ag71xx.h"
16#ifdef CONFIG_OF
17#include <linux/of.h>
18#include <linux/of_platform.h>
19#endif
20
21#define AG71XX_DEFAULT_MSG_ENABLE	\
22	(NETIF_MSG_DRV			\
23	| NETIF_MSG_PROBE		\
24	| NETIF_MSG_LINK		\
25	| NETIF_MSG_TIMER		\
26	| NETIF_MSG_IFDOWN		\
27	| NETIF_MSG_IFUP		\
28	| NETIF_MSG_RX_ERR		\
29	| NETIF_MSG_TX_ERR)
30
31static int ag71xx_msg_level = -1;
32
33module_param_named(msg_level, ag71xx_msg_level, int, 0);
34MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
35
36static void ag71xx_dump_dma_regs(struct ag71xx *ag)
37{
38	DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
39		ag->dev->name,
40		ag71xx_rr(ag, AG71XX_REG_TX_CTRL),
41		ag71xx_rr(ag, AG71XX_REG_TX_DESC),
42		ag71xx_rr(ag, AG71XX_REG_TX_STATUS));
43
44	DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
45		ag->dev->name,
46		ag71xx_rr(ag, AG71XX_REG_RX_CTRL),
47		ag71xx_rr(ag, AG71XX_REG_RX_DESC),
48		ag71xx_rr(ag, AG71XX_REG_RX_STATUS));
49}
50
51static void ag71xx_dump_regs(struct ag71xx *ag)
52{
53	DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
54		ag->dev->name,
55		ag71xx_rr(ag, AG71XX_REG_MAC_CFG1),
56		ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
57		ag71xx_rr(ag, AG71XX_REG_MAC_IPG),
58		ag71xx_rr(ag, AG71XX_REG_MAC_HDX),
59		ag71xx_rr(ag, AG71XX_REG_MAC_MFL));
60	DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
61		ag->dev->name,
62		ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL),
63		ag71xx_rr(ag, AG71XX_REG_MAC_ADDR1),
64		ag71xx_rr(ag, AG71XX_REG_MAC_ADDR2));
65	DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
66		ag->dev->name,
67		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
68		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
69		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
70	DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
71		ag->dev->name,
72		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
73		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
74		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
75}
76
77static inline void ag71xx_dump_intr(struct ag71xx *ag, char *label, u32 intr)
78{
79	DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
80		ag->dev->name, label, intr,
81		(intr & AG71XX_INT_TX_PS) ? "TXPS " : "",
82		(intr & AG71XX_INT_TX_UR) ? "TXUR " : "",
83		(intr & AG71XX_INT_TX_BE) ? "TXBE " : "",
84		(intr & AG71XX_INT_RX_PR) ? "RXPR " : "",
85		(intr & AG71XX_INT_RX_OF) ? "RXOF " : "",
86		(intr & AG71XX_INT_RX_BE) ? "RXBE " : "");
87}
88
89static void ag71xx_ring_free(struct ag71xx_ring *ring)
90{
91	kfree(ring->buf);
92
93	if (ring->descs_cpu)
94		dma_free_coherent(NULL, ring->size * ring->desc_size,
95				  ring->descs_cpu, ring->descs_dma);
96}
97
98static int ag71xx_ring_alloc(struct ag71xx_ring *ring)
99{
100	int i;
101
102	ring->desc_size = sizeof(struct ag71xx_desc);
103	if (ring->desc_size % cache_line_size()) {
104		DBG("ag71xx: ring %p, desc size %u rounded to %u\n",
105			ring, ring->desc_size,
106			roundup(ring->desc_size, cache_line_size()));
107		ring->desc_size = roundup(ring->desc_size, cache_line_size());
108	}
109
110	ring->descs_cpu = dma_alloc_coherent(NULL, ring->size * ring->desc_size,
111					     &ring->descs_dma, GFP_ATOMIC);
112	if (!ring->descs_cpu) {
113		return -ENOMEM;
114	}
115
116
117	ring->buf = kzalloc(ring->size * sizeof(*ring->buf), GFP_KERNEL);
118	if (!ring->buf) {
119		return -ENOMEM;
120	}
121
122	for (i = 0; i < ring->size; i++) {
123		int idx = i * ring->desc_size;
124		ring->buf[i].desc = (struct ag71xx_desc *)&ring->descs_cpu[idx];
125		DBG("ag71xx: ring %p, desc %d at %p\n",
126			ring, i, ring->buf[i].desc);
127	}
128
129	return 0;
130}
131
132static void ag71xx_ring_tx_clean(struct ag71xx *ag)
133{
134	struct ag71xx_ring *ring = &ag->tx_ring;
135	struct net_device *dev = ag->dev;
136	unsigned int bytes_compl = 0;
137	unsigned int pkts_compl = 0;
138	unsigned int dirty = ring->dirty;
139	unsigned int mask = ring->mask;
140	unsigned int used = ring->used;
141
142	if (!ring->buf) {
143		return;
144	}
145
146	while (used) {
147		struct ag71xx_buf *buf = &ring->buf[dirty];
148		struct ag71xx_desc *desc = buf->desc;
149		struct sk_buff *skb;
150
151		/*
152		 * If the descriptor is not marked as empty then mark it as
153		 * empty and record a TX error.
154		 */
155		if (!(desc->ctrl & DESC_EMPTY)) {
156			desc->ctrl = DESC_EMPTY;
157			dev->stats.tx_errors++;
158		}
159
160		skb = buf->skb;
161		buf->skb = NULL;
162
163		bytes_compl += skb->len;
164		pkts_compl++;
165		dev_kfree_skb(skb);
166
167		dirty++;
168		dirty &= mask;
169
170		used--;
171	}
172
173	ring->dirty = dirty;
174	ring->used = used;
175
176	netdev_completed_queue(dev, pkts_compl, bytes_compl);
177}
178
179static void ag71xx_ring_tx_init(struct ag71xx *ag)
180{
181	struct ag71xx_ring *ring = &ag->tx_ring;
182	unsigned int mask = ring->mask;
183	unsigned int size = ring->size;
184	int i;
185
186	for (i = 0; i < size; i++) {
187		struct ag71xx_buf *buf = &ring->buf[i];
188		struct ag71xx_desc *desc = buf->desc;
189
190		desc->next = (u32)(ring->descs_dma +
191				   ring->desc_size * ((i + 1) & mask));
192
193		desc->ctrl = DESC_EMPTY;
194		buf->skb = NULL;
195	}
196
197	ring->curr = 0;
198	ring->dirty = 0;
199	ring->used = 0;
200	netdev_reset_queue(ag->dev);
201}
202
203static void ag71xx_ring_rx_clean(struct ag71xx *ag)
204{
205	struct ag71xx_ring *ring = &ag->rx_ring;
206	struct net_device *dev = ag->dev;
207	int i;
208
209	if (!ring->buf) {
210		return;
211	}
212
213	for (i = 0; i < ring->size; i++) {
214		struct ag71xx_buf *buf = &ring->buf[i];
215		struct sk_buff *skb = buf->skb;
216
217		if (skb) {
218			dma_unmap_single(&dev->dev, buf->dma_addr,
219					 ag->rx_buf_size, DMA_FROM_DEVICE);
220			dev_kfree_skb(skb);
221			buf->skb = NULL;
222		}
223	}
224}
225
226static int ag71xx_ring_rx_init(struct ag71xx *ag)
227{
228	struct ag71xx_ring *ring = &ag->rx_ring;
229	struct net_device *dev = ag->dev;
230	unsigned int mask = ring->mask;
231	unsigned int size = ring->size;
232	unsigned int rx_buf_size = ag->rx_buf_size;
233	unsigned int rx_buf_offset = ag->rx_buf_offset;
234	unsigned int i;
235
236	for (i = 0; i < size; i++) {
237		struct ag71xx_buf *buf = &ring->buf[i];
238		struct ag71xx_desc *desc = buf->desc;
239		struct sk_buff *skb;
240
241		desc->next = (u32)(ring->descs_dma +
242				   ring->desc_size * ((i + 1) & mask));
243
244		skb = dev_alloc_skb(rx_buf_size);
245		if (unlikely(!skb)) {
246			return -ENOMEM;
247		}
248
249		skb_reserve(skb, rx_buf_offset);
250
251		buf->skb = skb;
252		buf->dma_addr = dma_map_single(&dev->dev, skb->data,
253					       rx_buf_size, DMA_FROM_DEVICE);
254
255		desc->data = (u32)buf->dma_addr;
256		desc->ctrl = DESC_EMPTY;
257	}
258
259	ring->curr = 0;
260	ring->dirty = 0;
261	ring->used = size;
262
263	return 0;
264}
265
266static int ag71xx_rings_init(struct ag71xx *ag)
267{
268	int ret;
269
270	ret = ag71xx_ring_alloc(&ag->tx_ring);
271	if (ret)
272		return ret;
273
274	ag71xx_ring_tx_init(ag);
275
276	ret = ag71xx_ring_alloc(&ag->rx_ring);
277	if (ret)
278		return ret;
279
280	ret = ag71xx_ring_rx_init(ag);
281	return ret;
282}
283
284static void ag71xx_rings_cleanup(struct ag71xx *ag)
285{
286	ag71xx_ring_rx_clean(ag);
287	ag71xx_ring_free(&ag->rx_ring);
288
289	ag71xx_ring_tx_clean(ag);
290	netdev_reset_queue(ag->dev);
291	ag71xx_ring_free(&ag->tx_ring);
292}
293
294static unsigned char *ag71xx_speed_str(struct ag71xx *ag)
295{
296	switch (ag->speed) {
297	case SPEED_1000:
298		return "1000";
299	case SPEED_100:
300		return "100";
301	case SPEED_10:
302		return "10";
303	}
304
305	return "?";
306}
307
308static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
309{
310	u32 t;
311
312	t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
313	    | (((u32)mac[3]) << 8) | ((u32)mac[2]);
314
315	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
316
317	t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
318	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
319}
320
321static void ag71xx_dma_reset(struct ag71xx *ag)
322{
323	u32 val;
324	int i;
325
326	ag71xx_dump_dma_regs(ag);
327
328	/* stop RX and TX */
329	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
330	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
331
332	/*
333	 * give the hardware some time to really stop all rx/tx activity
334	 * clearing the descriptors too early causes random memory corruption
335	 */
336	mdelay(1);
337
338	/* clear descriptor addresses */
339	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
340	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
341
342	/* clear pending RX/TX interrupts */
343	for (i = 0; i < 256; i++) {
344		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
345		ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
346	}
347
348	/* clear pending errors */
349	ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
350	ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
351
352	val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
353	if (val)
354		pr_alert("%s: unable to clear DMA Rx status: %08x\n",
355			 ag->dev->name, val);
356
357	val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
358
359	/* mask out reserved bits */
360	val &= ~0xff000000;
361
362	if (val)
363		pr_alert("%s: unable to clear DMA Tx status: %08x\n",
364			 ag->dev->name, val);
365
366	ag71xx_dump_dma_regs(ag);
367}
368
369#define MAC_CFG1_INIT	(MAC_CFG1_RXE | MAC_CFG1_TXE | \
370			 MAC_CFG1_SRX | MAC_CFG1_STX)
371
372#define FIFO_CFG0_INIT	(FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
373
374#define FIFO_CFG4_INIT	(FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
375			 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
376			 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
377			 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
378			 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
379			 FIFO_CFG4_VT)
380
381#define FIFO_CFG5_INIT	(FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
382			 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
383			 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
384			 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
385			 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
386			 FIFO_CFG5_17 | FIFO_CFG5_SF)
387
388static void ag71xx_hw_stop(struct ag71xx *ag)
389{
390	/* disable all interrupts and stop the rx/tx engine */
391	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
392	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
393	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
394}
395
396static void ag71xx_hw_setup(struct ag71xx *ag)
397{
398	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
399
400	/* setup MAC configuration registers */
401	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_INIT);
402
403	ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
404		  MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
405
406	/* setup max frame length */
407	if(ag->dev->mtu < AG71XX_TX_MTU_LEN)
408		ag71xx_wr(ag, AG71XX_REG_MAC_MFL, AG71XX_TX_MTU_LEN);
409	else
410		ag71xx_wr(ag, AG71XX_REG_MAC_MFL, ag->dev->mtu);
411
412	/* setup FIFO configuration registers */
413	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
414	if (pdata->is_ar724x) {
415		ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, pdata->fifo_cfg1);
416		ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, pdata->fifo_cfg2);
417	} else {
418		ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000);
419		ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff);
420	}
421	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
422	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
423}
424
425static void ag71xx_hw_init(struct ag71xx *ag)
426{
427	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
428	u32 reset_mask = pdata->reset_bit;
429
430	ag71xx_hw_stop(ag);
431
432	if (pdata->is_ar724x) {
433		u32 reset_phy = reset_mask;
434
435		reset_phy &= AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY;
436		reset_mask &= ~(AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY);
437
438		ath79_device_reset_set(reset_phy);
439		mdelay(50);
440		ath79_device_reset_clear(reset_phy);
441		mdelay(200);
442	}
443
444	ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
445	udelay(20);
446
447	ath79_device_reset_set(reset_mask);
448	mdelay(100);
449	ath79_device_reset_clear(reset_mask);
450	mdelay(200);
451
452	ag71xx_hw_setup(ag);
453
454	ag71xx_dma_reset(ag);
455}
456
457static void ag71xx_fast_reset(struct ag71xx *ag)
458{
459	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
460	struct net_device *dev = ag->dev;
461	u32 reset_mask = pdata->reset_bit;
462	u32 rx_ds, tx_ds;
463	u32 mii_reg;
464
465	reset_mask &= AR71XX_RESET_GE0_MAC | AR71XX_RESET_GE1_MAC;
466
467	mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
468	rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
469	tx_ds = ag71xx_rr(ag, AG71XX_REG_TX_DESC);
470
471	ath79_device_reset_set(reset_mask);
472	udelay(10);
473	ath79_device_reset_clear(reset_mask);
474	udelay(10);
475
476	ag71xx_dma_reset(ag);
477	ag71xx_hw_setup(ag);
478
479	ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
480	ag71xx_wr(ag, AG71XX_REG_TX_DESC, tx_ds);
481	ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
482
483	ag71xx_hw_set_macaddr(ag, dev->dev_addr);
484}
485
486static void ag71xx_hw_start(struct ag71xx *ag)
487{
488	/* start RX engine */
489	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
490
491	/* enable interrupts */
492	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
493}
494
495void ag71xx_link_adjust(struct ag71xx *ag)
496{
497	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
498	u32 cfg2;
499	u32 ifctl;
500	u32 fifo5;
501
502	if (!ag->link) {
503		ag71xx_hw_stop(ag);
504		netif_carrier_off(ag->dev);
505		if (netif_msg_link(ag))
506			pr_info("%s: link down\n", ag->dev->name);
507		return;
508	}
509
510	if (pdata->is_ar724x)
511		ag71xx_fast_reset(ag);
512
513	cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
514	cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
515	cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0;
516
517	ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
518	ifctl &= ~(MAC_IFCTL_SPEED);
519
520	fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
521	fifo5 &= ~FIFO_CFG5_BM;
522
523	switch (ag->speed) {
524	case SPEED_1000:
525		cfg2 |= MAC_CFG2_IF_1000;
526		fifo5 |= FIFO_CFG5_BM;
527		break;
528	case SPEED_100:
529		cfg2 |= MAC_CFG2_IF_10_100;
530		ifctl |= MAC_IFCTL_SPEED;
531		break;
532	case SPEED_10:
533		cfg2 |= MAC_CFG2_IF_10_100;
534		break;
535	default:
536		BUG();
537		return;
538	}
539
540	if (pdata->is_ar91xx)
541		ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, 0x00780fff);
542	else if (pdata->is_ar724x)
543		ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, pdata->fifo_cfg3);
544	else
545		ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, 0x008001ff);
546
547	if (pdata->set_speed)
548		pdata->set_speed(ag->speed);
549
550	ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
551	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
552	ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
553	ag71xx_hw_start(ag);
554
555	netif_carrier_on(ag->dev);
556	if (netif_msg_link(ag))
557		pr_info("%s: link up (%sMbps/%s duplex)\n",
558			ag->dev->name,
559			ag71xx_speed_str(ag),
560			(DUPLEX_FULL == ag->duplex) ? "Full" : "Half");
561
562	DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n",
563		ag->dev->name,
564		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
565		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
566		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
567
568	DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n",
569		ag->dev->name,
570		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
571		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
572		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
573
574	DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x\n",
575		ag->dev->name,
576		ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
577		ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL));
578}
579
580static int ag71xx_open(struct net_device *dev)
581{
582	struct ag71xx *ag = netdev_priv(dev);
583	int ret;
584
585	/*
586	 * Compute the RX buffer size.
587	 */
588	if (dev->mtu > AG71XX_TX_MTU_LEN) {
589		ag->rx_buf_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + NET_SKB_PAD + NET_IP_ALIGN;
590	} else {
591		ag->rx_buf_size = AG71XX_RX_BUF_SIZE;
592	}
593
594	/*
595	 * Compute the RX buffer offset.  On AR71xx/AR91xx packets must be
596	 * 4-byte aligned.
597	 *
598	 * When using builtin AR8216 support, hardware adds a 2-byte header,
599	 * so we don't need any extra alignment in that case.
600	 */
601	if (!ag71xx_get_pdata(ag)->is_ar724x || ag71xx_has_ar8216(ag)) {
602		ag->rx_buf_offset = NET_SKB_PAD;
603	} else {
604		ag->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
605	}
606
607	ret = ag71xx_rings_init(ag);
608	if (ret) {
609		ag71xx_rings_cleanup(ag);
610		return ret;
611	}
612
613	napi_enable(&ag->napi);
614
615	netif_carrier_off(dev);
616	ag71xx_phy_start(ag);
617
618	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
619	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
620
621	ag71xx_hw_set_macaddr(ag, dev->dev_addr);
622
623	netif_start_queue(dev);
624	ag->tx_stopped = false;
625
626	return 0;
627}
628
629static int ag71xx_stop(struct net_device *dev)
630{
631	struct ag71xx *ag = netdev_priv(dev);
632	unsigned long flags;
633
634	netif_carrier_off(dev);
635	ag71xx_phy_stop(ag);
636
637	spin_lock_irqsave(&ag->lock, flags);
638
639	ag->tx_stopped = true;
640	netif_stop_queue(dev);
641
642	ag71xx_hw_stop(ag);
643	ag71xx_dma_reset(ag);
644
645	napi_disable(&ag->napi);
646	del_timer_sync(&ag->oom_timer);
647
648	spin_unlock_irqrestore(&ag->lock, flags);
649
650	ag71xx_rings_cleanup(ag);
651
652	return 0;
653}
654
655static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
656					  struct net_device *dev)
657{
658	struct ag71xx *ag = netdev_priv(dev);
659	struct ag71xx_ring *ring = &ag->tx_ring;
660	unsigned int curr = ring->curr;
661	unsigned int mask = ring->mask;
662	unsigned int used = ring->used;
663	unsigned int size = ring->size;
664	struct ag71xx_buf *buf = &ring->buf[curr];
665	struct ag71xx_desc *desc = buf->desc;
666	unsigned int len;
667	dma_addr_t dma_addr;
668
669	/*
670	 * We shouldn't ever see our ring fully used and reach here but just in case!
671	 */
672	if (unlikely(used == size)) {
673		DBG("%s: tx queue full\n", dev->name);
674		ag->tx_stopped = true;
675		netif_stop_queue(dev);
676		goto err_drop;
677	}
678
679	if (unlikely(ag71xx_has_ar8216(ag))) {
680		ag71xx_add_ar8216_header(ag, skb);
681	}
682
683	len = skb->len;
684	if (unlikely(len <= 0)) {
685		DBG("%s: packet len is too small\n", dev->name);
686		goto err_drop;
687	}
688
689	dma_addr = dma_map_single(&dev->dev, skb->data, len, DMA_TO_DEVICE);
690
691	netdev_sent_queue(dev, len);
692	buf->skb = skb;
693	buf->timestamp = jiffies;
694
695	/* setup descriptor fields */
696	desc->data = (u32)dma_addr;
697	desc->ctrl = len & DESC_PKTLEN_M;
698
699	curr++;
700	curr &= mask;
701	ring->curr = curr;
702
703	used++;
704	ring->used = used;
705
706	/*
707	 * If our transmit ring is full then stop transmitting.
708	 */
709	if (unlikely(used == size)) {
710		DBG("%s: tx queue full\n", ag->dev->name);
711		ag->tx_stopped = true;
712		netif_stop_queue(dev);
713	}
714
715	DBG("%s: packet injected into TX queue\n", ag->dev->name);
716
717	/* enable TX engine */
718	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
719
720	return NETDEV_TX_OK;
721
722err_drop:
723	dev->stats.tx_dropped++;
724
725	dev_kfree_skb(skb);
726	return NETDEV_TX_OK;
727}
728
729static int ag71xx_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
730{
731	struct ag71xx *ag = netdev_priv(dev);
732	int ret;
733
734	switch (cmd) {
735	case SIOCETHTOOL:
736		if (ag->phy_dev == NULL)
737			break;
738
739		spin_lock_irq(&ag->lock);
740		ret = phy_ethtool_ioctl(ag->phy_dev, (void *) ifr->ifr_data);
741		spin_unlock_irq(&ag->lock);
742		return ret;
743
744	case SIOCSIFHWADDR:
745		if (copy_from_user
746			(dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
747			return -EFAULT;
748		return 0;
749
750	case SIOCGIFHWADDR:
751		if (copy_to_user
752			(ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
753			return -EFAULT;
754		return 0;
755
756	case SIOCGMIIPHY:
757	case SIOCGMIIREG:
758	case SIOCSMIIREG:
759		if (ag->phy_dev == NULL)
760			break;
761
762		return phy_mii_ioctl(ag->phy_dev, ifr, cmd);
763
764	default:
765		break;
766	}
767
768	return -EOPNOTSUPP;
769}
770
771static void ag71xx_oom_timer_handler(unsigned long data)
772{
773	struct net_device *dev = (struct net_device *)data;
774	struct ag71xx *ag = netdev_priv(dev);
775
776	napi_schedule(&ag->napi);
777}
778
779static void ag71xx_tx_timeout(struct net_device *dev)
780{
781	struct ag71xx *ag = netdev_priv(dev);
782
783	if (netif_msg_tx_err(ag))
784		pr_info("%s: tx timeout\n", ag->dev->name);
785
786	schedule_work(&ag->restart_work);
787}
788
789static void ag71xx_restart_work_func(struct work_struct *work)
790{
791	struct ag71xx *ag = container_of(work, struct ag71xx, restart_work);
792
793	if (ag71xx_get_pdata(ag)->is_ar724x) {
794		ag->link = 0;
795		ag71xx_link_adjust(ag);
796		return;
797	}
798
799	ag71xx_stop(ag->dev);
800	ag71xx_open(ag->dev);
801}
802
803static bool ag71xx_check_dma_stuck(struct ag71xx *ag, unsigned long timestamp)
804{
805	u32 rx_sm, tx_sm, rx_fd;
806
807	if (likely(time_before(jiffies, timestamp + HZ/10)))
808		return false;
809
810	if (!netif_carrier_ok(ag->dev))
811		return false;
812
813	rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
814	if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
815		return true;
816
817	tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
818	rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
819	if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
820	    ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
821		return true;
822
823	return false;
824}
825
826static int ag71xx_tx_packets(struct ag71xx *ag, struct net_device *dev)
827{
828	struct ag71xx_ring *ring = &ag->tx_ring;
829	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
830	unsigned int sent = 0;
831	unsigned int bytes_compl = 0;
832	unsigned int dirty = ring->dirty;
833	unsigned int mask = ring->mask;
834	unsigned int used = ring->used;
835
836	DBG("%s: processing TX ring\n", dev->name);
837
838	while (used) {
839		struct ag71xx_buf *buf = &ring->buf[dirty];
840		struct ag71xx_desc *desc = buf->desc;
841		struct sk_buff *skb;
842
843		if (unlikely(!(desc->ctrl & DESC_EMPTY))) {
844			if (unlikely(pdata->is_ar7240)) {
845				if (unlikely(ag71xx_check_dma_stuck(ag, buf->timestamp))) {
846					schedule_work(&ag->restart_work);
847				}
848			}
849			break;
850		}
851
852		ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
853
854		skb = buf->skb;
855		buf->skb = NULL;
856		bytes_compl += skb->len;
857		sent++;
858
859		dev_kfree_skb(skb);
860
861		dirty++;
862		dirty &= mask;
863
864		used--;
865	}
866
867	ring->dirty = dirty;
868	ring->used = used;
869
870	dev->stats.tx_bytes += bytes_compl;
871	dev->stats.tx_packets += sent;
872
873	DBG("%s: %u packets sent out\n", dev->name, sent);
874
875	/*
876	 * Mark the amount of work we've done.
877	 */
878	netdev_completed_queue(dev, sent, bytes_compl);
879
880	/*
881	 * If our transmit queue was previously stopped because we'd run out
882	 * of space and we've now successfully freed some space then restart
883	 * the transmit queue again.
884	 */
885	if (unlikely(ag->tx_stopped) && sent) {
886		netif_wake_queue(dev);
887		ag->tx_stopped = false;
888	}
889
890	return sent;
891}
892
893static int ag71xx_rx_packets(struct ag71xx *ag, struct net_device *dev, int limit)
894{
895	struct ag71xx_ring *ring = &ag->rx_ring;
896	unsigned int rx_buf_size = ag->rx_buf_size;
897	unsigned int rx_buf_offset = ag->rx_buf_offset;
898	unsigned int curr = ring->curr;
899	unsigned int mask = ring->mask;
900	unsigned int used = ring->used;
901	unsigned int dirty = ring->dirty;
902	unsigned int size = ring->size;
903	int done = 0;
904
905	DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
906			dev->name, limit, curr, ring->dirty);
907
908	/*
909	 * Don't try to scan more entries than we have.
910	 */
911	if (limit > used) {
912		limit = used;
913	}
914
915	/*
916	 * Process newly received packets.
917	 */
918	while (done < limit) {
919		struct ag71xx_buf *buf = &ring->buf[curr];
920		struct ag71xx_desc *desc = buf->desc;
921		u32 desc_ctrl;
922		struct sk_buff *skb;
923		int pktlen;
924
925		/*
926		 * Is our descriptor marked as empty?  If it is then we're done.
927		 */
928		desc_ctrl = desc->ctrl;
929		if (unlikely(desc_ctrl & DESC_EMPTY)) {
930			break;
931		}
932
933		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
934
935		pktlen = desc_ctrl & DESC_PKTLEN_M;
936		pktlen -= ETH_FCS_LEN;
937
938		dma_unmap_single(&dev->dev, buf->dma_addr,
939				 rx_buf_size, DMA_FROM_DEVICE);
940
941		dev->last_rx = jiffies;
942		dev->stats.rx_packets++;
943		dev->stats.rx_bytes += pktlen;
944
945		skb = buf->skb;
946		buf->skb = NULL;
947
948		/*
949		 * Set up the offset and length of the skb.
950		 */
951		__skb_put(skb, pktlen);
952
953		/*
954		 * Speed up eth_type_trans() since it will inspect the packet payload.
955		 */
956		prefetch(skb->data);
957
958		if (unlikely(ag71xx_has_ar8216(ag))) {
959			int err = ag71xx_remove_ar8216_header(ag, skb, pktlen);
960			if (err) {
961				dev->stats.rx_dropped++;
962				dev_kfree_skb(skb);
963				goto next;
964			}
965		}
966
967		skb->ip_summed = CHECKSUM_NONE;
968		skb->protocol = eth_type_trans(skb, dev);
969		netif_receive_skb(skb);
970
971next:
972		curr++;
973		curr &= mask;
974
975		done++;
976	}
977
978	ring->curr = curr;
979	used -= done;
980
981	/*
982	 * Replenish the RX buffer entries.
983	 */
984	while (used < size) {
985		struct ag71xx_buf *buf = &ring->buf[dirty];
986		struct ag71xx_desc *desc = buf->desc;
987		struct sk_buff *skb;
988
989		skb = dev_alloc_skb(rx_buf_size);
990		if (unlikely(!skb)) {
991			break;
992		}
993
994		skb_reserve(skb, rx_buf_offset);
995
996		buf->skb = skb;
997		buf->dma_addr = dma_map_single(&dev->dev, skb->data,
998					       rx_buf_size, DMA_FROM_DEVICE);
999
1000		desc->data = (u32)buf->dma_addr;
1001		desc->ctrl = DESC_EMPTY;
1002
1003		dirty++;
1004		dirty &= mask;
1005
1006		used++;
1007	}
1008
1009	ring->dirty = dirty;
1010	ring->used = used;
1011
1012	DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1013		dev->name, ring->curr, ring->dirty, done);
1014
1015	return done;
1016}
1017
1018static int ag71xx_poll(struct napi_struct *napi, int limit)
1019{
1020	struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1021	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
1022	struct net_device *dev = ag->dev;
1023	struct ag71xx_ring *rx_ring;
1024	unsigned long flags;
1025	u32 status;
1026	int tx_done;
1027	int rx_done;
1028
1029	pdata->ddr_flush();
1030
1031	tx_done = ag71xx_tx_packets(ag, dev);
1032	rx_done = ag71xx_rx_packets(ag, dev, limit);
1033
1034	ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);
1035
1036	rx_ring = &ag->rx_ring;
1037	if (unlikely(rx_ring->used != rx_ring->size)) {
1038		if (netif_msg_rx_err(ag))
1039			pr_info("%s: out of memory\n", dev->name);
1040
1041		mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1042		napi_complete(napi);
1043		return 0;
1044	}
1045
1046	status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1047	if (unlikely(status & RX_STATUS_OF)) {
1048		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1049		dev->stats.rx_fifo_errors++;
1050
1051		/* restart RX */
1052		ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1053	}
1054
1055	if (rx_done < limit) {
1056		DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1057			dev->name, rx_done, tx_done, limit);
1058
1059		napi_complete(napi);
1060
1061		/* enable interrupts */
1062		spin_lock_irqsave(&ag->lock, flags);
1063		ag71xx_int_enable(ag, AG71XX_INT_POLL);
1064		spin_unlock_irqrestore(&ag->lock, flags);
1065		return rx_done;
1066	}
1067
1068more:
1069	DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1070			dev->name, rx_done, tx_done, limit);
1071	return rx_done;
1072}
1073
1074static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1075{
1076	struct net_device *dev = dev_id;
1077	struct ag71xx *ag = netdev_priv(dev);
1078	u32 status;
1079
1080	status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1081	ag71xx_dump_intr(ag, "raw", status);
1082
1083	if (unlikely(!status))
1084		return IRQ_NONE;
1085
1086	if (unlikely(status & AG71XX_INT_ERR)) {
1087		if (status & AG71XX_INT_TX_BE) {
1088			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1089			dev_err(&dev->dev, "TX BUS error\n");
1090		}
1091		if (status & AG71XX_INT_RX_BE) {
1092			ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1093			dev_err(&dev->dev, "RX BUS error\n");
1094		}
1095	}
1096
1097	if (likely(status & AG71XX_INT_POLL)) {
1098		ag71xx_int_disable(ag, AG71XX_INT_POLL);
1099		DBG("%s: enable polling mode\n", dev->name);
1100		napi_schedule(&ag->napi);
1101	}
1102
1103	ag71xx_debugfs_update_int_stats(ag, status);
1104
1105	return IRQ_HANDLED;
1106}
1107
1108#ifdef CONFIG_NET_POLL_CONTROLLER
1109/*
1110 * Polling 'interrupt' - used by things like netconsole to send skbs
1111 * without having to re-enable interrupts. It's not called while
1112 * the interrupt routine is executing.
1113 */
1114static void ag71xx_netpoll(struct net_device *dev)
1115{
1116	disable_irq(dev->irq);
1117	ag71xx_interrupt(dev->irq, dev);
1118	enable_irq(dev->irq);
1119}
1120#endif
1121static int ag71xx_change_mtu(struct net_device *dev, int new_mtu)
1122{
1123	int ret;
1124
1125	if (new_mtu < 68 || new_mtu > AG71XX_JUMBO_LEN)
1126		return -EINVAL;
1127
1128	if (!netif_running(dev)) {
1129		dev->mtu = new_mtu;
1130		return 0;
1131	}
1132
1133	ag71xx_stop(dev);
1134	printk("%s:%s new_mtu is %d\n",__func__,dev->name,new_mtu);
1135
1136	dev->mtu = new_mtu;
1137
1138	ret = ag71xx_open(dev);
1139	if (ret)
1140		dev_close(dev);
1141
1142	return ret;
1143}
1144
1145static const struct net_device_ops ag71xx_netdev_ops = {
1146	.ndo_open		= ag71xx_open,
1147	.ndo_stop		= ag71xx_stop,
1148	.ndo_start_xmit		= ag71xx_hard_start_xmit,
1149	.ndo_do_ioctl		= ag71xx_do_ioctl,
1150	.ndo_tx_timeout		= ag71xx_tx_timeout,
1151	.ndo_change_mtu		= ag71xx_change_mtu,
1152	.ndo_set_mac_address	= eth_mac_addr,
1153	.ndo_validate_addr	= eth_validate_addr,
1154#ifdef CONFIG_NET_POLL_CONTROLLER
1155	.ndo_poll_controller	= ag71xx_netpoll,
1156#endif
1157};
1158
1159#ifdef CONFIG_OF
1160static void ag71xx_of_gmac_setup(struct platform_device *pdev, u32 mask)
1161{
1162	struct resource *res;
1163	void __iomem *cfg_base;
1164
1165	if (!(res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base")))
1166		return;
1167
1168	cfg_base = ioremap_nocache(res->start, res->end - res->start + 1);
1169	if (!cfg_base) {
1170		dev_err(&pdev->dev, "unable to ioremap cfg_base\n");
1171		return;
1172	}
1173
1174	__raw_writel(__raw_readl(cfg_base) | mask, cfg_base);
1175	/* flush write */
1176	(void)__raw_readl(cfg_base);
1177
1178	iounmap(cfg_base);
1179}
1180
1181static int ag71xx_of_pdata_update(struct platform_device *pdev)
1182{
1183	u32 value[4];
1184	const phandle *ph;
1185	struct device_node *mdio;
1186	struct platform_device *pdev_mdio;
1187	struct ag71xx_platform_data *pdata = pdev->dev.platform_data;
1188
1189	if (!pdev->dev.of_node)
1190		return -EINVAL;
1191
1192	ph = of_get_property(pdev->dev.of_node, "mdio-handle", NULL);
1193	if (!ph) {
1194		dev_err(&pdev->dev, "No mdio-handle in dtb\n");
1195		return -EINVAL;
1196	}
1197
1198	mdio = of_find_node_by_phandle(*ph);
1199	if (!mdio) {
1200		dev_err(&pdev->dev, "No mdio device found by phandle\n");
1201		return -EINVAL;
1202	}
1203
1204	pdev_mdio = of_find_device_by_node(mdio);
1205	pdata->mii_bus_dev = &pdev_mdio->dev;
1206	of_node_put(mdio);
1207
1208	if (!of_property_read_u32(pdev->dev.of_node, "eth-cfg", &value[0]))
1209		ag71xx_of_gmac_setup(pdev, value[0]);
1210
1211	if (pdata->update_pll &&
1212			!of_property_read_u32_array(pdev->dev.of_node, "eth-pll-data", value, 3))
1213		pdata->update_pll(value[0], value[1], value[2]);
1214
1215	if (!of_property_read_u32_array(pdev->dev.of_node, "eth-phy-cfg", value, 4)) {
1216		pdata->phy_if_mode = value[0];
1217		pdata->phy_mask = value[1];
1218		pdata->speed = value[2];
1219		pdata->duplex = value[3];
1220	}
1221
1222	if (!of_property_read_u32_array(pdev->dev.of_node, "eth-fifo-cfg", value, 3)) {
1223		pdata->fifo_cfg1 = value[0];
1224		pdata->fifo_cfg2 = value[1];
1225		pdata->fifo_cfg3 = value[2];
1226	}
1227
1228	if (pdata->switch_data &&
1229			!of_property_read_u32_array(pdev->dev.of_node, "eth-sw-cfg", value, 2)) {
1230		pdata->switch_data->phy4_mii_en = value[0];
1231		pdata->switch_data->phy_poll_mask = value[1];
1232	}
1233
1234	return 0;
1235}
1236#else
1237static int ag71xx_of_pdata_update(struct platform_device *pdev)
1238{
1239	return -EINVAL;
1240}
1241#endif
1242
1243static int __devinit ag71xx_probe(struct platform_device *pdev)
1244{
1245	struct net_device *dev;
1246	struct resource *res;
1247	struct ag71xx *ag;
1248	struct ag71xx_desc *ag_stop_desc;
1249	struct ag71xx_platform_data *pdata;
1250	int err;
1251
1252	pdata = pdev->dev.platform_data;
1253	if (!pdata) {
1254		dev_err(&pdev->dev, "no platform data specified\n");
1255		err = -ENXIO;
1256		goto err_out;
1257	}
1258
1259	if (pdata->mii_bus_dev == NULL &&
1260			ag71xx_of_pdata_update(pdev)) {
1261		dev_err(&pdev->dev, "no MII bus device specified\n");
1262		err = -EINVAL;
1263		goto err_out;
1264	}
1265
1266	dev = alloc_etherdev(sizeof(*ag));
1267	if (!dev) {
1268		dev_err(&pdev->dev, "alloc_etherdev failed\n");
1269		err = -ENOMEM;
1270		goto err_out;
1271	}
1272
1273	SET_NETDEV_DEV(dev, &pdev->dev);
1274
1275	ag = netdev_priv(dev);
1276	ag->pdev = pdev;
1277	ag->dev = dev;
1278	ag->msg_enable = netif_msg_init(ag71xx_msg_level,
1279					AG71XX_DEFAULT_MSG_ENABLE);
1280	spin_lock_init(&ag->lock);
1281
1282	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac_base");
1283	if (!res) {
1284		dev_err(&pdev->dev, "no mac_base resource found\n");
1285		err = -ENXIO;
1286		goto err_out;
1287	}
1288
1289	ag->mac_base = ioremap_nocache(res->start, res->end - res->start + 1);
1290	if (!ag->mac_base) {
1291		dev_err(&pdev->dev, "unable to ioremap mac_base\n");
1292		err = -ENOMEM;
1293		goto err_free_dev;
1294	}
1295
1296	dev->irq = platform_get_irq(pdev, 0);
1297	err = request_irq(dev->irq, ag71xx_interrupt,
1298			  IRQF_DISABLED,
1299			  dev->name, dev);
1300	if (err) {
1301		dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq);
1302		goto err_unmap_base;
1303	}
1304
1305	dev->base_addr = (unsigned long)ag->mac_base;
1306	dev->netdev_ops = &ag71xx_netdev_ops;
1307	dev->ethtool_ops = &ag71xx_ethtool_ops;
1308
1309	INIT_WORK(&ag->restart_work, ag71xx_restart_work_func);
1310
1311	init_timer(&ag->oom_timer);
1312	ag->oom_timer.data = (unsigned long) dev;
1313	ag->oom_timer.function = ag71xx_oom_timer_handler;
1314
1315	ag->tx_ring.size = AG71XX_TX_RING_SIZE_DEFAULT;
1316	ag->tx_ring.mask = AG71XX_TX_RING_SIZE_DEFAULT - 1;
1317	ag->rx_ring.size = AG71XX_RX_RING_SIZE_DEFAULT;
1318	ag->rx_ring.mask = AG71XX_RX_RING_SIZE_DEFAULT - 1;
1319
1320	ag_stop_desc = dma_alloc_coherent(NULL,
1321		sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL);
1322
1323	if (!ag_stop_desc)
1324		goto err_free_irq;
1325
1326	ag_stop_desc->data = 0;
1327	ag_stop_desc->ctrl = 0;
1328	ag_stop_desc->next = (u32)ag->stop_desc_dma;
1329	ag->stop_desc = ag_stop_desc;
1330
1331	memcpy(dev->dev_addr, pdata->mac_addr, ETH_ALEN);
1332
1333	netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
1334
1335	err = register_netdev(dev);
1336	if (err) {
1337		dev_err(&pdev->dev, "unable to register net device\n");
1338		goto err_free_desc;
1339	}
1340
1341	pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d\n",
1342		dev->name, dev->base_addr, dev->irq);
1343
1344	ag71xx_dump_regs(ag);
1345
1346	ag71xx_hw_init(ag);
1347
1348	ag71xx_dump_regs(ag);
1349
1350	err = ag71xx_phy_connect(ag);
1351	if (err)
1352		goto err_unregister_netdev;
1353
1354	err = ag71xx_debugfs_init(ag);
1355	if (err)
1356		goto err_phy_disconnect;
1357
1358	platform_set_drvdata(pdev, dev);
1359
1360	return 0;
1361
1362err_phy_disconnect:
1363	ag71xx_phy_disconnect(ag);
1364err_unregister_netdev:
1365	unregister_netdev(dev);
1366err_free_desc:
1367	dma_free_coherent(NULL, sizeof(struct ag71xx_desc), ag->stop_desc,
1368			  ag->stop_desc_dma);
1369err_free_irq:
1370	free_irq(dev->irq, dev);
1371err_unmap_base:
1372	iounmap(ag->mac_base);
1373err_free_dev:
1374	kfree(dev);
1375err_out:
1376	platform_set_drvdata(pdev, NULL);
1377	return err;
1378}
1379
1380static int __devexit ag71xx_remove(struct platform_device *pdev)
1381{
1382	struct net_device *dev = platform_get_drvdata(pdev);
1383
1384	if (dev) {
1385		struct ag71xx *ag = netdev_priv(dev);
1386
1387		ag71xx_debugfs_exit(ag);
1388		ag71xx_phy_disconnect(ag);
1389		unregister_netdev(dev);
1390		free_irq(dev->irq, dev);
1391		iounmap(ag->mac_base);
1392		kfree(dev);
1393		platform_set_drvdata(pdev, NULL);
1394	}
1395
1396	return 0;
1397}
1398
1399#ifdef CONFIG_OF
1400static const struct of_device_id ag71xx_of_match_table[] = {
1401	{.compatible = "qcom,ag71xx-eth"},
1402	{}
1403};
1404#else
1405#define ag71xx_of_match_table NULL
1406#endif
1407
1408static struct platform_driver ag71xx_driver = {
1409	.probe		= ag71xx_probe,
1410	.remove		= __exit_p(ag71xx_remove),
1411	.driver = {
1412		.name	= AG71XX_DRV_NAME,
1413		.of_match_table = ag71xx_of_match_table,
1414	}
1415};
1416
1417static int __init ag71xx_module_init(void)
1418{
1419	int ret;
1420
1421	ret = ag71xx_debugfs_root_init();
1422	if (ret)
1423		goto err_out;
1424
1425	ret = ag71xx_mdio_driver_init();
1426	if (ret)
1427		goto err_debugfs_exit;
1428
1429	ret = platform_driver_register(&ag71xx_driver);
1430	if (ret)
1431		goto err_mdio_exit;
1432
1433	return 0;
1434
1435err_mdio_exit:
1436	ag71xx_mdio_driver_exit();
1437err_debugfs_exit:
1438	ag71xx_debugfs_root_exit();
1439err_out:
1440	return ret;
1441}
1442
1443static void __exit ag71xx_module_exit(void)
1444{
1445	platform_driver_unregister(&ag71xx_driver);
1446	ag71xx_mdio_driver_exit();
1447	ag71xx_debugfs_root_exit();
1448}
1449
1450module_init(ag71xx_module_init);
1451module_exit(ag71xx_module_exit);
1452
1453MODULE_VERSION(AG71XX_DRV_VERSION);
1454MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1455MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1456MODULE_LICENSE("GPL v2");
1457MODULE_ALIAS("platform:" AG71XX_DRV_NAME);
1458