1/*
2 *  Atheros AR71xx built-in ethernet mac driver
3 *
4 *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 *
7 *  Based on Atheros' AG7100 driver
8 *
9 *  This program is free software; you can redistribute it and/or modify it
10 *  under the terms of the GNU General Public License version 2 as published
11 *  by the Free Software Foundation.
12 */
13
14#include "ag71xx.h"
15
16#if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
17static inline void skb_free_frag(void *data)
18{
19	put_page(virt_to_head_page(data));
20}
21#endif
22
23#define AG71XX_DEFAULT_MSG_ENABLE	\
24	(NETIF_MSG_DRV			\
25	| NETIF_MSG_PROBE		\
26	| NETIF_MSG_LINK		\
27	| NETIF_MSG_TIMER		\
28	| NETIF_MSG_IFDOWN		\
29	| NETIF_MSG_IFUP		\
30	| NETIF_MSG_RX_ERR		\
31	| NETIF_MSG_TX_ERR)
32
33static int ag71xx_msg_level = -1;
34
35module_param_named(msg_level, ag71xx_msg_level, int, 0);
36MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
37
38#define ETH_SWITCH_HEADER_LEN	2
39
40static int ag71xx_tx_packets(struct ag71xx *ag, bool flush);
41
42static inline unsigned int ag71xx_max_frame_len(unsigned int mtu)
43{
44	return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
45}
46
47static void ag71xx_dump_dma_regs(struct ag71xx *ag)
48{
49	DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n",
50		ag->dev->name,
51		ag71xx_rr(ag, AG71XX_REG_TX_CTRL),
52		ag71xx_rr(ag, AG71XX_REG_TX_DESC),
53		ag71xx_rr(ag, AG71XX_REG_TX_STATUS));
54
55	DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n",
56		ag->dev->name,
57		ag71xx_rr(ag, AG71XX_REG_RX_CTRL),
58		ag71xx_rr(ag, AG71XX_REG_RX_DESC),
59		ag71xx_rr(ag, AG71XX_REG_RX_STATUS));
60}
61
62static void ag71xx_dump_regs(struct ag71xx *ag)
63{
64	DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
65		ag->dev->name,
66		ag71xx_rr(ag, AG71XX_REG_MAC_CFG1),
67		ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
68		ag71xx_rr(ag, AG71XX_REG_MAC_IPG),
69		ag71xx_rr(ag, AG71XX_REG_MAC_HDX),
70		ag71xx_rr(ag, AG71XX_REG_MAC_MFL));
71	DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
72		ag->dev->name,
73		ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL),
74		ag71xx_rr(ag, AG71XX_REG_MAC_ADDR1),
75		ag71xx_rr(ag, AG71XX_REG_MAC_ADDR2));
76	DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
77		ag->dev->name,
78		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
79		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
80		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
81	DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n",
82		ag->dev->name,
83		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
84		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
85		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
86}
87
88static inline void ag71xx_dump_intr(struct ag71xx *ag, char *label, u32 intr)
89{
90	DBG("%s: %s intr=%08x %s%s%s%s%s%s\n",
91		ag->dev->name, label, intr,
92		(intr & AG71XX_INT_TX_PS) ? "TXPS " : "",
93		(intr & AG71XX_INT_TX_UR) ? "TXUR " : "",
94		(intr & AG71XX_INT_TX_BE) ? "TXBE " : "",
95		(intr & AG71XX_INT_RX_PR) ? "RXPR " : "",
96		(intr & AG71XX_INT_RX_OF) ? "RXOF " : "",
97		(intr & AG71XX_INT_RX_BE) ? "RXBE " : "");
98}
99
100static void ag71xx_ring_free(struct ag71xx_ring *ring)
101{
102	int ring_size = BIT(ring->order);
103	kfree(ring->buf);
104
105	if (ring->descs_cpu)
106		dma_free_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
107				  ring->descs_cpu, ring->descs_dma);
108}
109
110static int ag71xx_ring_alloc(struct ag71xx_ring *ring)
111{
112	int ring_size = BIT(ring->order);
113	int err;
114
115	ring->descs_cpu = dma_alloc_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
116					     &ring->descs_dma, GFP_ATOMIC);
117	if (!ring->descs_cpu) {
118		err = -ENOMEM;
119		goto err;
120	}
121
122
123	ring->buf = kzalloc(ring_size * sizeof(*ring->buf), GFP_KERNEL);
124	if (!ring->buf) {
125		err = -ENOMEM;
126		goto err;
127	}
128
129	return 0;
130
131err:
132	return err;
133}
134
135static void ag71xx_ring_tx_clean(struct ag71xx *ag)
136{
137	struct ag71xx_ring *ring = &ag->tx_ring;
138	struct net_device *dev = ag->dev;
139	int ring_mask = BIT(ring->order) - 1;
140	u32 bytes_compl = 0, pkts_compl = 0;
141
142	while (ring->curr != ring->dirty) {
143		struct ag71xx_desc *desc;
144		u32 i = ring->dirty & ring_mask;
145
146		desc = ag71xx_ring_desc(ring, i);
147		if (!ag71xx_desc_empty(desc)) {
148			desc->ctrl = 0;
149			dev->stats.tx_errors++;
150		}
151
152		if (ring->buf[i].skb) {
153			bytes_compl += ring->buf[i].len;
154			pkts_compl++;
155			dev_kfree_skb_any(ring->buf[i].skb);
156		}
157		ring->buf[i].skb = NULL;
158		ring->dirty++;
159	}
160
161	/* flush descriptors */
162	wmb();
163
164	netdev_completed_queue(dev, pkts_compl, bytes_compl);
165}
166
167static void ag71xx_ring_tx_init(struct ag71xx *ag)
168{
169	struct ag71xx_ring *ring = &ag->tx_ring;
170	int ring_size = BIT(ring->order);
171	int ring_mask = ring_size - 1;
172	int i;
173
174	for (i = 0; i < ring_size; i++) {
175		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
176
177		desc->next = (u32) (ring->descs_dma +
178			AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
179
180		desc->ctrl = DESC_EMPTY;
181		ring->buf[i].skb = NULL;
182	}
183
184	/* flush descriptors */
185	wmb();
186
187	ring->curr = 0;
188	ring->dirty = 0;
189	netdev_reset_queue(ag->dev);
190}
191
192static void ag71xx_ring_rx_clean(struct ag71xx *ag)
193{
194	struct ag71xx_ring *ring = &ag->rx_ring;
195	int ring_size = BIT(ring->order);
196	int i;
197
198	if (!ring->buf)
199		return;
200
201	for (i = 0; i < ring_size; i++)
202		if (ring->buf[i].rx_buf) {
203			dma_unmap_single(&ag->dev->dev, ring->buf[i].dma_addr,
204					 ag->rx_buf_size, DMA_FROM_DEVICE);
205			skb_free_frag(ring->buf[i].rx_buf);
206		}
207}
208
209static int ag71xx_buffer_offset(struct ag71xx *ag)
210{
211	int offset = NET_SKB_PAD;
212
213	/*
214	 * On AR71xx/AR91xx packets must be 4-byte aligned.
215	 *
216	 * When using builtin AR8216 support, hardware adds a 2-byte header,
217	 * so we don't need any extra alignment in that case.
218	 */
219	if (!ag71xx_get_pdata(ag)->is_ar724x || ag71xx_has_ar8216(ag))
220		return offset;
221
222	return offset + NET_IP_ALIGN;
223}
224
225static int ag71xx_buffer_size(struct ag71xx *ag)
226{
227	return ag->rx_buf_size +
228	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
229}
230
231static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
232			       int offset,
233			       void *(*alloc)(unsigned int size))
234{
235	struct ag71xx_ring *ring = &ag->rx_ring;
236	struct ag71xx_desc *desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
237	void *data;
238
239	data = alloc(ag71xx_buffer_size(ag));
240	if (!data)
241		return false;
242
243	buf->rx_buf = data;
244	buf->dma_addr = dma_map_single(&ag->dev->dev, data, ag->rx_buf_size,
245				       DMA_FROM_DEVICE);
246	desc->data = (u32) buf->dma_addr + offset;
247	return true;
248}
249
250static int ag71xx_ring_rx_init(struct ag71xx *ag)
251{
252	struct ag71xx_ring *ring = &ag->rx_ring;
253	int ring_size = BIT(ring->order);
254	int ring_mask = BIT(ring->order) - 1;
255	unsigned int i;
256	int ret;
257	int offset = ag71xx_buffer_offset(ag);
258
259	ret = 0;
260	for (i = 0; i < ring_size; i++) {
261		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
262
263		desc->next = (u32) (ring->descs_dma +
264			AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
265
266		DBG("ag71xx: RX desc at %p, next is %08x\n",
267			desc, desc->next);
268	}
269
270	for (i = 0; i < ring_size; i++) {
271		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
272
273		if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
274					netdev_alloc_frag)) {
275			ret = -ENOMEM;
276			break;
277		}
278
279		desc->ctrl = DESC_EMPTY;
280	}
281
282	/* flush descriptors */
283	wmb();
284
285	ring->curr = 0;
286	ring->dirty = 0;
287
288	return ret;
289}
290
291static int ag71xx_ring_rx_refill(struct ag71xx *ag)
292{
293	struct ag71xx_ring *ring = &ag->rx_ring;
294	int ring_mask = BIT(ring->order) - 1;
295	unsigned int count;
296	int offset = ag71xx_buffer_offset(ag);
297
298	count = 0;
299	for (; ring->curr - ring->dirty > 0; ring->dirty++) {
300		struct ag71xx_desc *desc;
301		unsigned int i;
302
303		i = ring->dirty & ring_mask;
304		desc = ag71xx_ring_desc(ring, i);
305
306		if (!ring->buf[i].rx_buf &&
307		    !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
308					napi_alloc_frag))
309			break;
310
311		desc->ctrl = DESC_EMPTY;
312		count++;
313	}
314
315	/* flush descriptors */
316	wmb();
317
318	DBG("%s: %u rx descriptors refilled\n", ag->dev->name, count);
319
320	return count;
321}
322
323static int ag71xx_rings_init(struct ag71xx *ag)
324{
325	int ret;
326
327	ret = ag71xx_ring_alloc(&ag->tx_ring);
328	if (ret)
329		return ret;
330
331	ag71xx_ring_tx_init(ag);
332
333	ret = ag71xx_ring_alloc(&ag->rx_ring);
334	if (ret)
335		return ret;
336
337	ret = ag71xx_ring_rx_init(ag);
338	return ret;
339}
340
341static void ag71xx_rings_cleanup(struct ag71xx *ag)
342{
343	ag71xx_ring_rx_clean(ag);
344	ag71xx_ring_free(&ag->rx_ring);
345
346	ag71xx_ring_tx_clean(ag);
347	netdev_reset_queue(ag->dev);
348	ag71xx_ring_free(&ag->tx_ring);
349}
350
351static unsigned char *ag71xx_speed_str(struct ag71xx *ag)
352{
353	switch (ag->speed) {
354	case SPEED_1000:
355		return "1000";
356	case SPEED_100:
357		return "100";
358	case SPEED_10:
359		return "10";
360	}
361
362	return "?";
363}
364
365static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
366{
367	u32 t;
368
369	t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16)
370	  | (((u32) mac[3]) << 8) | ((u32) mac[2]);
371
372	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
373
374	t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16);
375	ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
376}
377
378static void ag71xx_dma_reset(struct ag71xx *ag)
379{
380	u32 val;
381	int i;
382
383	ag71xx_dump_dma_regs(ag);
384
385	/* stop RX and TX */
386	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
387	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
388
389	/*
390	 * give the hardware some time to really stop all rx/tx activity
391	 * clearing the descriptors too early causes random memory corruption
392	 */
393	mdelay(1);
394
395	/* clear descriptor addresses */
396	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
397	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
398
399	/* clear pending RX/TX interrupts */
400	for (i = 0; i < 256; i++) {
401		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
402		ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
403	}
404
405	/* clear pending errors */
406	ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
407	ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
408
409	val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
410	if (val)
411		pr_alert("%s: unable to clear DMA Rx status: %08x\n",
412			 ag->dev->name, val);
413
414	val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
415
416	/* mask out reserved bits */
417	val &= ~0xff000000;
418
419	if (val)
420		pr_alert("%s: unable to clear DMA Tx status: %08x\n",
421			 ag->dev->name, val);
422
423	ag71xx_dump_dma_regs(ag);
424}
425
426#define MAC_CFG1_INIT	(MAC_CFG1_RXE | MAC_CFG1_TXE | \
427			 MAC_CFG1_SRX | MAC_CFG1_STX)
428
429#define FIFO_CFG0_INIT	(FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
430
431#define FIFO_CFG4_INIT	(FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
432			 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
433			 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
434			 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
435			 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
436			 FIFO_CFG4_VT)
437
438#define FIFO_CFG5_INIT	(FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
439			 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
440			 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
441			 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
442			 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
443			 FIFO_CFG5_17 | FIFO_CFG5_SF)
444
445static void ag71xx_hw_stop(struct ag71xx *ag)
446{
447	/* disable all interrupts and stop the rx/tx engine */
448	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
449	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
450	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
451}
452
453static void ag71xx_hw_setup(struct ag71xx *ag)
454{
455	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
456	u32 init = MAC_CFG1_INIT;
457
458	/* setup MAC configuration registers */
459	if (pdata->use_flow_control)
460		init |= MAC_CFG1_TFC | MAC_CFG1_RFC;
461	ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
462
463	ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
464		  MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
465
466	/* setup max frame length to zero */
467	ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
468
469	/* setup FIFO configuration registers */
470	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
471	if (pdata->is_ar724x) {
472		ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, pdata->fifo_cfg1);
473		ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, pdata->fifo_cfg2);
474	} else {
475		ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000);
476		ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff);
477	}
478	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
479	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
480}
481
482static void ag71xx_hw_init(struct ag71xx *ag)
483{
484	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
485	u32 reset_mask = pdata->reset_bit;
486
487	ag71xx_hw_stop(ag);
488
489	if (pdata->is_ar724x) {
490		u32 reset_phy = reset_mask;
491
492		reset_phy &= AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY;
493		reset_mask &= ~(AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY);
494
495		ath79_device_reset_set(reset_phy);
496		msleep(50);
497		ath79_device_reset_clear(reset_phy);
498		msleep(200);
499	}
500
501	ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
502	udelay(20);
503
504	ath79_device_reset_set(reset_mask);
505	msleep(100);
506	ath79_device_reset_clear(reset_mask);
507	msleep(200);
508
509	ag71xx_hw_setup(ag);
510
511	ag71xx_dma_reset(ag);
512}
513
514static void ag71xx_fast_reset(struct ag71xx *ag)
515{
516	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
517	struct net_device *dev = ag->dev;
518	u32 reset_mask = pdata->reset_bit;
519	u32 rx_ds;
520	u32 mii_reg;
521
522	reset_mask &= AR71XX_RESET_GE0_MAC | AR71XX_RESET_GE1_MAC;
523
524	ag71xx_hw_stop(ag);
525	wmb();
526
527	mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
528	rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
529
530	ath79_device_reset_set(reset_mask);
531	udelay(10);
532	ath79_device_reset_clear(reset_mask);
533	udelay(10);
534
535	ag71xx_dma_reset(ag);
536	ag71xx_hw_setup(ag);
537	ag71xx_tx_packets(ag, true);
538	ag->tx_ring.curr = 0;
539	ag->tx_ring.dirty = 0;
540	netdev_reset_queue(ag->dev);
541
542	/* setup max frame length */
543	ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
544		  ag71xx_max_frame_len(ag->dev->mtu));
545
546	ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
547	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
548	ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
549
550	ag71xx_hw_set_macaddr(ag, dev->dev_addr);
551}
552
553static void ag71xx_hw_start(struct ag71xx *ag)
554{
555	/* start RX engine */
556	ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
557
558	/* enable interrupts */
559	ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
560
561	netif_wake_queue(ag->dev);
562}
563
564static void
565__ag71xx_link_adjust(struct ag71xx *ag, bool update)
566{
567	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
568	u32 cfg2;
569	u32 ifctl;
570	u32 fifo5;
571	u32 fifo3;
572
573	if (!ag->link && update) {
574		ag71xx_hw_stop(ag);
575		netif_carrier_off(ag->dev);
576		if (netif_msg_link(ag))
577			pr_info("%s: link down\n", ag->dev->name);
578		return;
579	}
580
581	if (pdata->is_ar724x)
582		ag71xx_fast_reset(ag);
583
584	cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
585	cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
586	cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0;
587
588	ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
589	ifctl &= ~(MAC_IFCTL_SPEED);
590
591	fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
592	fifo5 &= ~FIFO_CFG5_BM;
593
594	switch (ag->speed) {
595	case SPEED_1000:
596		cfg2 |= MAC_CFG2_IF_1000;
597		fifo5 |= FIFO_CFG5_BM;
598		break;
599	case SPEED_100:
600		cfg2 |= MAC_CFG2_IF_10_100;
601		ifctl |= MAC_IFCTL_SPEED;
602		break;
603	case SPEED_10:
604		cfg2 |= MAC_CFG2_IF_10_100;
605		break;
606	default:
607		BUG();
608		return;
609	}
610
611	if (pdata->is_ar91xx)
612		fifo3 = 0x00780fff;
613	else if (pdata->is_ar724x)
614		fifo3 = pdata->fifo_cfg3;
615	else
616		fifo3 = 0x008001ff;
617
618	if (ag->tx_ring.desc_split) {
619		fifo3 &= 0xffff;
620		fifo3 |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
621	}
622
623	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, fifo3);
624
625	if (update && pdata->set_speed)
626		pdata->set_speed(ag->speed);
627
628	ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
629	ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
630	ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
631	ag71xx_hw_start(ag);
632
633	netif_carrier_on(ag->dev);
634	if (update && netif_msg_link(ag))
635		pr_info("%s: link up (%sMbps/%s duplex)\n",
636			ag->dev->name,
637			ag71xx_speed_str(ag),
638			(DUPLEX_FULL == ag->duplex) ? "Full" : "Half");
639
640	DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n",
641		ag->dev->name,
642		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
643		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
644		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
645
646	DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n",
647		ag->dev->name,
648		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
649		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
650		ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
651
652	DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x\n",
653		ag->dev->name,
654		ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
655		ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL));
656}
657
658void ag71xx_link_adjust(struct ag71xx *ag)
659{
660	__ag71xx_link_adjust(ag, true);
661}
662
663static int ag71xx_hw_enable(struct ag71xx *ag)
664{
665	int ret;
666
667	ret = ag71xx_rings_init(ag);
668	if (ret)
669		return ret;
670
671	napi_enable(&ag->napi);
672	ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
673	ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
674	netif_start_queue(ag->dev);
675
676	return 0;
677}
678
679static void ag71xx_hw_disable(struct ag71xx *ag)
680{
681	unsigned long flags;
682
683	spin_lock_irqsave(&ag->lock, flags);
684
685	netif_stop_queue(ag->dev);
686
687	ag71xx_hw_stop(ag);
688	ag71xx_dma_reset(ag);
689
690	napi_disable(&ag->napi);
691	del_timer_sync(&ag->oom_timer);
692
693	spin_unlock_irqrestore(&ag->lock, flags);
694
695	ag71xx_rings_cleanup(ag);
696}
697
698static int ag71xx_open(struct net_device *dev)
699{
700	struct ag71xx *ag = netdev_priv(dev);
701	unsigned int max_frame_len;
702	int ret;
703
704	netif_carrier_off(dev);
705	max_frame_len = ag71xx_max_frame_len(dev->mtu);
706	ag->rx_buf_size = SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
707
708	/* setup max frame length */
709	ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
710	ag71xx_hw_set_macaddr(ag, dev->dev_addr);
711
712	ret = ag71xx_hw_enable(ag);
713	if (ret)
714		goto err;
715
716	ag71xx_phy_start(ag);
717
718	return 0;
719
720err:
721	ag71xx_rings_cleanup(ag);
722	return ret;
723}
724
725static int ag71xx_stop(struct net_device *dev)
726{
727	struct ag71xx *ag = netdev_priv(dev);
728
729	netif_carrier_off(dev);
730	ag71xx_phy_stop(ag);
731	ag71xx_hw_disable(ag);
732
733	return 0;
734}
735
736static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
737{
738	int i;
739	struct ag71xx_desc *desc;
740	int ring_mask = BIT(ring->order) - 1;
741	int ndesc = 0;
742	int split = ring->desc_split;
743
744	if (!split)
745		split = len;
746
747	while (len > 0) {
748		unsigned int cur_len = len;
749
750		i = (ring->curr + ndesc) & ring_mask;
751		desc = ag71xx_ring_desc(ring, i);
752
753		if (!ag71xx_desc_empty(desc))
754			return -1;
755
756		if (cur_len > split) {
757			cur_len = split;
758
759			/*
760			 * TX will hang if DMA transfers <= 4 bytes,
761			 * make sure next segment is more than 4 bytes long.
762			 */
763			if (len <= split + 4)
764				cur_len -= 4;
765		}
766
767		desc->data = addr;
768		addr += cur_len;
769		len -= cur_len;
770
771		if (len > 0)
772			cur_len |= DESC_MORE;
773
774		/* prevent early tx attempt of this descriptor */
775		if (!ndesc)
776			cur_len |= DESC_EMPTY;
777
778		desc->ctrl = cur_len;
779		ndesc++;
780	}
781
782	return ndesc;
783}
784
785static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
786					  struct net_device *dev)
787{
788	struct ag71xx *ag = netdev_priv(dev);
789	struct ag71xx_ring *ring = &ag->tx_ring;
790	int ring_mask = BIT(ring->order) - 1;
791	int ring_size = BIT(ring->order);
792	struct ag71xx_desc *desc;
793	dma_addr_t dma_addr;
794	int i, n, ring_min;
795
796	if (ag71xx_has_ar8216(ag))
797		ag71xx_add_ar8216_header(ag, skb);
798
799	if (skb->len <= 4) {
800		DBG("%s: packet len is too small\n", ag->dev->name);
801		goto err_drop;
802	}
803
804	dma_addr = dma_map_single(&dev->dev, skb->data, skb->len,
805				  DMA_TO_DEVICE);
806
807	i = ring->curr & ring_mask;
808	desc = ag71xx_ring_desc(ring, i);
809
810	/* setup descriptor fields */
811	n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask);
812	if (n < 0)
813		goto err_drop_unmap;
814
815	i = (ring->curr + n - 1) & ring_mask;
816	ring->buf[i].len = skb->len;
817	ring->buf[i].skb = skb;
818	ring->buf[i].timestamp = jiffies;
819
820	netdev_sent_queue(dev, skb->len);
821
822	desc->ctrl &= ~DESC_EMPTY;
823	ring->curr += n;
824
825	/* flush descriptor */
826	wmb();
827
828	ring_min = 2;
829	if (ring->desc_split)
830	    ring_min *= AG71XX_TX_RING_DS_PER_PKT;
831
832	if (ring->curr - ring->dirty >= ring_size - ring_min) {
833		DBG("%s: tx queue full\n", dev->name);
834		netif_stop_queue(dev);
835	}
836
837	DBG("%s: packet injected into TX queue\n", ag->dev->name);
838
839	/* enable TX engine */
840	ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
841
842	return NETDEV_TX_OK;
843
844err_drop_unmap:
845	dma_unmap_single(&dev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
846
847err_drop:
848	dev->stats.tx_dropped++;
849
850	dev_kfree_skb(skb);
851	return NETDEV_TX_OK;
852}
853
854static int ag71xx_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
855{
856	struct ag71xx *ag = netdev_priv(dev);
857	int ret;
858
859	switch (cmd) {
860	case SIOCETHTOOL:
861		if (ag->phy_dev == NULL)
862			break;
863
864		spin_lock_irq(&ag->lock);
865		ret = phy_ethtool_ioctl(ag->phy_dev, (void *) ifr->ifr_data);
866		spin_unlock_irq(&ag->lock);
867		return ret;
868
869	case SIOCSIFHWADDR:
870		if (copy_from_user
871			(dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
872			return -EFAULT;
873		return 0;
874
875	case SIOCGIFHWADDR:
876		if (copy_to_user
877			(ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
878			return -EFAULT;
879		return 0;
880
881	case SIOCGMIIPHY:
882	case SIOCGMIIREG:
883	case SIOCSMIIREG:
884		if (ag->phy_dev == NULL)
885			break;
886
887		return phy_mii_ioctl(ag->phy_dev, ifr, cmd);
888
889	default:
890		break;
891	}
892
893	return -EOPNOTSUPP;
894}
895
896static void ag71xx_oom_timer_handler(unsigned long data)
897{
898	struct net_device *dev = (struct net_device *) data;
899	struct ag71xx *ag = netdev_priv(dev);
900
901	napi_schedule(&ag->napi);
902}
903
904static void ag71xx_tx_timeout(struct net_device *dev)
905{
906	struct ag71xx *ag = netdev_priv(dev);
907
908	if (netif_msg_tx_err(ag))
909		pr_info("%s: tx timeout\n", ag->dev->name);
910
911	schedule_work(&ag->restart_work);
912}
913
914static void ag71xx_restart_work_func(struct work_struct *work)
915{
916	struct ag71xx *ag = container_of(work, struct ag71xx, restart_work);
917
918	rtnl_lock();
919	ag71xx_hw_disable(ag);
920	ag71xx_hw_enable(ag);
921	if (ag->link)
922		__ag71xx_link_adjust(ag, false);
923	rtnl_unlock();
924}
925
926static bool ag71xx_check_dma_stuck(struct ag71xx *ag, unsigned long timestamp)
927{
928	u32 rx_sm, tx_sm, rx_fd;
929
930	if (likely(time_before(jiffies, timestamp + HZ/10)))
931		return false;
932
933	if (!netif_carrier_ok(ag->dev))
934		return false;
935
936	rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
937	if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
938		return true;
939
940	tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
941	rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
942	if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
943	    ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
944		return true;
945
946	return false;
947}
948
949static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
950{
951	struct ag71xx_ring *ring = &ag->tx_ring;
952	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
953	int ring_mask = BIT(ring->order) - 1;
954	int ring_size = BIT(ring->order);
955	int sent = 0;
956	int bytes_compl = 0;
957	int n = 0;
958
959	DBG("%s: processing TX ring\n", ag->dev->name);
960
961	while (ring->dirty + n != ring->curr) {
962		unsigned int i = (ring->dirty + n) & ring_mask;
963		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
964		struct sk_buff *skb = ring->buf[i].skb;
965
966		if (!flush && !ag71xx_desc_empty(desc)) {
967			if (pdata->is_ar724x &&
968			    ag71xx_check_dma_stuck(ag, ring->buf[i].timestamp))
969				schedule_work(&ag->restart_work);
970			break;
971		}
972
973		if (flush)
974			desc->ctrl |= DESC_EMPTY;
975
976		n++;
977		if (!skb)
978			continue;
979
980		dev_kfree_skb_any(skb);
981		ring->buf[i].skb = NULL;
982
983		bytes_compl += ring->buf[i].len;
984
985		sent++;
986		ring->dirty += n;
987
988		while (n > 0) {
989			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
990			n--;
991		}
992	}
993
994	DBG("%s: %d packets sent out\n", ag->dev->name, sent);
995
996	ag->dev->stats.tx_bytes += bytes_compl;
997	ag->dev->stats.tx_packets += sent;
998
999	if (!sent)
1000		return 0;
1001
1002	netdev_completed_queue(ag->dev, sent, bytes_compl);
1003	if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
1004		netif_wake_queue(ag->dev);
1005
1006	return sent;
1007}
1008
1009static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1010{
1011	struct net_device *dev = ag->dev;
1012	struct ag71xx_ring *ring = &ag->rx_ring;
1013	int offset = ag71xx_buffer_offset(ag);
1014	unsigned int pktlen_mask = ag->desc_pktlen_mask;
1015	int ring_mask = BIT(ring->order) - 1;
1016	int ring_size = BIT(ring->order);
1017	struct sk_buff_head queue;
1018	struct sk_buff *skb;
1019	int done = 0;
1020
1021	DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
1022			dev->name, limit, ring->curr, ring->dirty);
1023
1024	skb_queue_head_init(&queue);
1025
1026	while (done < limit) {
1027		unsigned int i = ring->curr & ring_mask;
1028		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1029		int pktlen;
1030		int err = 0;
1031
1032		if (ag71xx_desc_empty(desc))
1033			break;
1034
1035		if ((ring->dirty + ring_size) == ring->curr) {
1036			ag71xx_assert(0);
1037			break;
1038		}
1039
1040		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1041
1042		pktlen = desc->ctrl & pktlen_mask;
1043		pktlen -= ETH_FCS_LEN;
1044
1045		dma_unmap_single(&dev->dev, ring->buf[i].dma_addr,
1046				 ag->rx_buf_size, DMA_FROM_DEVICE);
1047
1048		dev->stats.rx_packets++;
1049		dev->stats.rx_bytes += pktlen;
1050
1051		skb = build_skb(ring->buf[i].rx_buf, ag71xx_buffer_size(ag));
1052		if (!skb) {
1053			skb_free_frag(ring->buf[i].rx_buf);
1054			goto next;
1055		}
1056
1057		skb_reserve(skb, offset);
1058		skb_put(skb, pktlen);
1059
1060		if (ag71xx_has_ar8216(ag))
1061			err = ag71xx_remove_ar8216_header(ag, skb, pktlen);
1062
1063		if (err) {
1064			dev->stats.rx_dropped++;
1065			kfree_skb(skb);
1066		} else {
1067			skb->dev = dev;
1068			skb->ip_summed = CHECKSUM_NONE;
1069			__skb_queue_tail(&queue, skb);
1070		}
1071
1072next:
1073		ring->buf[i].rx_buf = NULL;
1074		done++;
1075
1076		ring->curr++;
1077	}
1078
1079	ag71xx_ring_rx_refill(ag);
1080
1081	while ((skb = __skb_dequeue(&queue)) != NULL) {
1082		skb->protocol = eth_type_trans(skb, dev);
1083		netif_receive_skb(skb);
1084	}
1085
1086	DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
1087		dev->name, ring->curr, ring->dirty, done);
1088
1089	return done;
1090}
1091
1092static int ag71xx_poll(struct napi_struct *napi, int limit)
1093{
1094	struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1095	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
1096	struct net_device *dev = ag->dev;
1097	struct ag71xx_ring *rx_ring = &ag->rx_ring;
1098	int rx_ring_size = BIT(rx_ring->order);
1099	unsigned long flags;
1100	u32 status;
1101	int tx_done;
1102	int rx_done;
1103
1104	pdata->ddr_flush();
1105	tx_done = ag71xx_tx_packets(ag, false);
1106
1107	DBG("%s: processing RX ring\n", dev->name);
1108	rx_done = ag71xx_rx_packets(ag, limit);
1109
1110	ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);
1111
1112	if (rx_ring->buf[rx_ring->dirty % rx_ring_size].rx_buf == NULL)
1113		goto oom;
1114
1115	status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1116	if (unlikely(status & RX_STATUS_OF)) {
1117		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1118		dev->stats.rx_fifo_errors++;
1119
1120		/* restart RX */
1121		ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1122	}
1123
1124	if (rx_done < limit) {
1125		if (status & RX_STATUS_PR)
1126			goto more;
1127
1128		status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1129		if (status & TX_STATUS_PS)
1130			goto more;
1131
1132		DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n",
1133			dev->name, rx_done, tx_done, limit);
1134
1135		napi_complete(napi);
1136
1137		/* enable interrupts */
1138		spin_lock_irqsave(&ag->lock, flags);
1139		ag71xx_int_enable(ag, AG71XX_INT_POLL);
1140		spin_unlock_irqrestore(&ag->lock, flags);
1141		return rx_done;
1142	}
1143
1144more:
1145	DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1146			dev->name, rx_done, tx_done, limit);
1147	return limit;
1148
1149oom:
1150	if (netif_msg_rx_err(ag))
1151		pr_info("%s: out of memory\n", dev->name);
1152
1153	mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1154	napi_complete(napi);
1155	return 0;
1156}
1157
1158static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1159{
1160	struct net_device *dev = dev_id;
1161	struct ag71xx *ag = netdev_priv(dev);
1162	u32 status;
1163
1164	status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1165	ag71xx_dump_intr(ag, "raw", status);
1166
1167	if (unlikely(!status))
1168		return IRQ_NONE;
1169
1170	if (unlikely(status & AG71XX_INT_ERR)) {
1171		if (status & AG71XX_INT_TX_BE) {
1172			ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1173			dev_err(&dev->dev, "TX BUS error\n");
1174		}
1175		if (status & AG71XX_INT_RX_BE) {
1176			ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1177			dev_err(&dev->dev, "RX BUS error\n");
1178		}
1179	}
1180
1181	if (likely(status & AG71XX_INT_POLL)) {
1182		ag71xx_int_disable(ag, AG71XX_INT_POLL);
1183		DBG("%s: enable polling mode\n", dev->name);
1184		napi_schedule(&ag->napi);
1185	}
1186
1187	ag71xx_debugfs_update_int_stats(ag, status);
1188
1189	return IRQ_HANDLED;
1190}
1191
1192#ifdef CONFIG_NET_POLL_CONTROLLER
1193/*
1194 * Polling 'interrupt' - used by things like netconsole to send skbs
1195 * without having to re-enable interrupts. It's not called while
1196 * the interrupt routine is executing.
1197 */
1198static void ag71xx_netpoll(struct net_device *dev)
1199{
1200	disable_irq(dev->irq);
1201	ag71xx_interrupt(dev->irq, dev);
1202	enable_irq(dev->irq);
1203}
1204#endif
1205
1206static int ag71xx_change_mtu(struct net_device *dev, int new_mtu)
1207{
1208	struct ag71xx *ag = netdev_priv(dev);
1209	unsigned int max_frame_len;
1210
1211	max_frame_len = ag71xx_max_frame_len(new_mtu);
1212	if (new_mtu < 68 || max_frame_len > ag->max_frame_len)
1213		return -EINVAL;
1214
1215	if (netif_running(dev))
1216		return -EBUSY;
1217
1218	dev->mtu = new_mtu;
1219	return 0;
1220}
1221
1222static const struct net_device_ops ag71xx_netdev_ops = {
1223	.ndo_open		= ag71xx_open,
1224	.ndo_stop		= ag71xx_stop,
1225	.ndo_start_xmit		= ag71xx_hard_start_xmit,
1226	.ndo_do_ioctl		= ag71xx_do_ioctl,
1227	.ndo_tx_timeout		= ag71xx_tx_timeout,
1228	.ndo_change_mtu		= ag71xx_change_mtu,
1229	.ndo_set_mac_address	= eth_mac_addr,
1230	.ndo_validate_addr	= eth_validate_addr,
1231#ifdef CONFIG_NET_POLL_CONTROLLER
1232	.ndo_poll_controller	= ag71xx_netpoll,
1233#endif
1234};
1235
1236static const char *ag71xx_get_phy_if_mode_name(phy_interface_t mode)
1237{
1238	switch (mode) {
1239	case PHY_INTERFACE_MODE_MII:
1240		return "MII";
1241	case PHY_INTERFACE_MODE_GMII:
1242		return "GMII";
1243	case PHY_INTERFACE_MODE_RMII:
1244		return "RMII";
1245	case PHY_INTERFACE_MODE_RGMII:
1246		return "RGMII";
1247	case PHY_INTERFACE_MODE_SGMII:
1248		return "SGMII";
1249	default:
1250		break;
1251	}
1252
1253	return "unknown";
1254}
1255
1256
1257static int ag71xx_probe(struct platform_device *pdev)
1258{
1259	struct net_device *dev;
1260	struct resource *res;
1261	struct ag71xx *ag;
1262	struct ag71xx_platform_data *pdata;
1263	int tx_size, err;
1264
1265	pdata = pdev->dev.platform_data;
1266	if (!pdata) {
1267		dev_err(&pdev->dev, "no platform data specified\n");
1268		err = -ENXIO;
1269		goto err_out;
1270	}
1271
1272	if (pdata->mii_bus_dev == NULL && pdata->phy_mask) {
1273		dev_err(&pdev->dev, "no MII bus device specified\n");
1274		err = -EINVAL;
1275		goto err_out;
1276	}
1277
1278	dev = alloc_etherdev(sizeof(*ag));
1279	if (!dev) {
1280		dev_err(&pdev->dev, "alloc_etherdev failed\n");
1281		err = -ENOMEM;
1282		goto err_out;
1283	}
1284
1285	if (!pdata->max_frame_len || !pdata->desc_pktlen_mask)
1286		return -EINVAL;
1287
1288	SET_NETDEV_DEV(dev, &pdev->dev);
1289
1290	ag = netdev_priv(dev);
1291	ag->pdev = pdev;
1292	ag->dev = dev;
1293	ag->msg_enable = netif_msg_init(ag71xx_msg_level,
1294					AG71XX_DEFAULT_MSG_ENABLE);
1295	spin_lock_init(&ag->lock);
1296
1297	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac_base");
1298	if (!res) {
1299		dev_err(&pdev->dev, "no mac_base resource found\n");
1300		err = -ENXIO;
1301		goto err_out;
1302	}
1303
1304	ag->mac_base = ioremap_nocache(res->start, res->end - res->start + 1);
1305	if (!ag->mac_base) {
1306		dev_err(&pdev->dev, "unable to ioremap mac_base\n");
1307		err = -ENOMEM;
1308		goto err_free_dev;
1309	}
1310
1311	dev->irq = platform_get_irq(pdev, 0);
1312	err = request_irq(dev->irq, ag71xx_interrupt,
1313			  0x0,
1314			  dev->name, dev);
1315	if (err) {
1316		dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq);
1317		goto err_unmap_base;
1318	}
1319
1320	dev->base_addr = (unsigned long)ag->mac_base;
1321	dev->netdev_ops = &ag71xx_netdev_ops;
1322	dev->ethtool_ops = &ag71xx_ethtool_ops;
1323
1324	INIT_WORK(&ag->restart_work, ag71xx_restart_work_func);
1325
1326	init_timer(&ag->oom_timer);
1327	ag->oom_timer.data = (unsigned long) dev;
1328	ag->oom_timer.function = ag71xx_oom_timer_handler;
1329
1330	tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1331	ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1332
1333	ag->max_frame_len = pdata->max_frame_len;
1334	ag->desc_pktlen_mask = pdata->desc_pktlen_mask;
1335
1336	if (!pdata->is_ar724x && !pdata->is_ar91xx) {
1337		ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1338		tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1339	}
1340	ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1341
1342	ag->stop_desc = dma_alloc_coherent(NULL,
1343		sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL);
1344
1345	if (!ag->stop_desc)
1346		goto err_free_irq;
1347
1348	ag->stop_desc->data = 0;
1349	ag->stop_desc->ctrl = 0;
1350	ag->stop_desc->next = (u32) ag->stop_desc_dma;
1351
1352	memcpy(dev->dev_addr, pdata->mac_addr, ETH_ALEN);
1353
1354	netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
1355
1356	ag71xx_dump_regs(ag);
1357
1358	ag71xx_hw_init(ag);
1359
1360	ag71xx_dump_regs(ag);
1361
1362	err = ag71xx_phy_connect(ag);
1363	if (err)
1364		goto err_free_desc;
1365
1366	err = ag71xx_debugfs_init(ag);
1367	if (err)
1368		goto err_phy_disconnect;
1369
1370	platform_set_drvdata(pdev, dev);
1371
1372	err = register_netdev(dev);
1373	if (err) {
1374		dev_err(&pdev->dev, "unable to register net device\n");
1375		goto err_debugfs_exit;
1376	}
1377
1378	pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1379		dev->name, dev->base_addr, dev->irq,
1380		ag71xx_get_phy_if_mode_name(pdata->phy_if_mode));
1381
1382	return 0;
1383
1384err_debugfs_exit:
1385	ag71xx_debugfs_exit(ag);
1386err_phy_disconnect:
1387	ag71xx_phy_disconnect(ag);
1388err_free_desc:
1389	dma_free_coherent(NULL, sizeof(struct ag71xx_desc), ag->stop_desc,
1390			  ag->stop_desc_dma);
1391err_free_irq:
1392	free_irq(dev->irq, dev);
1393err_unmap_base:
1394	iounmap(ag->mac_base);
1395err_free_dev:
1396	kfree(dev);
1397err_out:
1398	platform_set_drvdata(pdev, NULL);
1399	return err;
1400}
1401
1402static int ag71xx_remove(struct platform_device *pdev)
1403{
1404	struct net_device *dev = platform_get_drvdata(pdev);
1405
1406	if (dev) {
1407		struct ag71xx *ag = netdev_priv(dev);
1408
1409		ag71xx_debugfs_exit(ag);
1410		ag71xx_phy_disconnect(ag);
1411		unregister_netdev(dev);
1412		free_irq(dev->irq, dev);
1413		iounmap(ag->mac_base);
1414		kfree(dev);
1415		platform_set_drvdata(pdev, NULL);
1416	}
1417
1418	return 0;
1419}
1420
1421static struct platform_driver ag71xx_driver = {
1422	.probe		= ag71xx_probe,
1423	.remove		= ag71xx_remove,
1424	.driver = {
1425		.name	= AG71XX_DRV_NAME,
1426	}
1427};
1428
1429static int __init ag71xx_module_init(void)
1430{
1431	int ret;
1432
1433	ret = ag71xx_debugfs_root_init();
1434	if (ret)
1435		goto err_out;
1436
1437	ret = ag71xx_mdio_driver_init();
1438	if (ret)
1439		goto err_debugfs_exit;
1440
1441	ret = platform_driver_register(&ag71xx_driver);
1442	if (ret)
1443		goto err_mdio_exit;
1444
1445	return 0;
1446
1447err_mdio_exit:
1448	ag71xx_mdio_driver_exit();
1449err_debugfs_exit:
1450	ag71xx_debugfs_root_exit();
1451err_out:
1452	return ret;
1453}
1454
1455static void __exit ag71xx_module_exit(void)
1456{
1457	platform_driver_unregister(&ag71xx_driver);
1458	ag71xx_mdio_driver_exit();
1459	ag71xx_debugfs_root_exit();
1460}
1461
1462module_init(ag71xx_module_init);
1463module_exit(ag71xx_module_exit);
1464
1465MODULE_VERSION(AG71XX_DRV_VERSION);
1466MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1467MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
1468MODULE_LICENSE("GPL v2");
1469MODULE_ALIAS("platform:" AG71XX_DRV_NAME);
1470