• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/net/
1/*
2 * Copyright (C) 2006, 2007 Eugene Konev
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17 */
18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/moduleparam.h>
22
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/errno.h>
27#include <linux/types.h>
28#include <linux/delay.h>
29
30#include <linux/netdevice.h>
31#include <linux/if_vlan.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/skbuff.h>
35#include <linux/mii.h>
36#include <linux/phy.h>
37#include <linux/phy_fixed.h>
38#include <linux/platform_device.h>
39#include <linux/dma-mapping.h>
40#include <linux/clk.h>
41#include <asm/gpio.h>
42#include <asm/atomic.h>
43
44MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
45MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
46MODULE_LICENSE("GPL");
47MODULE_ALIAS("platform:cpmac");
48
49static int debug_level = 8;
50static int dumb_switch;
51
52/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
53module_param(debug_level, int, 0444);
54module_param(dumb_switch, int, 0444);
55
56MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
57MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
58
59#define CPMAC_VERSION "0.5.2"
60/* frame size + 802.1q tag + FCS size */
61#define CPMAC_SKB_SIZE		(ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
62#define CPMAC_QUEUES	8
63
64/* Ethernet registers */
65#define CPMAC_TX_CONTROL		0x0004
66#define CPMAC_TX_TEARDOWN		0x0008
67#define CPMAC_RX_CONTROL		0x0014
68#define CPMAC_RX_TEARDOWN		0x0018
69#define CPMAC_MBP			0x0100
70# define MBP_RXPASSCRC			0x40000000
71# define MBP_RXQOS			0x20000000
72# define MBP_RXNOCHAIN			0x10000000
73# define MBP_RXCMF			0x01000000
74# define MBP_RXSHORT			0x00800000
75# define MBP_RXCEF			0x00400000
76# define MBP_RXPROMISC			0x00200000
77# define MBP_PROMISCCHAN(channel)	(((channel) & 0x7) << 16)
78# define MBP_RXBCAST			0x00002000
79# define MBP_BCASTCHAN(channel)		(((channel) & 0x7) << 8)
80# define MBP_RXMCAST			0x00000020
81# define MBP_MCASTCHAN(channel)		((channel) & 0x7)
82#define CPMAC_UNICAST_ENABLE		0x0104
83#define CPMAC_UNICAST_CLEAR		0x0108
84#define CPMAC_MAX_LENGTH		0x010c
85#define CPMAC_BUFFER_OFFSET		0x0110
86#define CPMAC_MAC_CONTROL		0x0160
87# define MAC_TXPTYPE			0x00000200
88# define MAC_TXPACE			0x00000040
89# define MAC_MII			0x00000020
90# define MAC_TXFLOW			0x00000010
91# define MAC_RXFLOW			0x00000008
92# define MAC_MTEST			0x00000004
93# define MAC_LOOPBACK			0x00000002
94# define MAC_FDX			0x00000001
95#define CPMAC_MAC_STATUS		0x0164
96# define MAC_STATUS_QOS			0x00000004
97# define MAC_STATUS_RXFLOW		0x00000002
98# define MAC_STATUS_TXFLOW		0x00000001
99#define CPMAC_TX_INT_ENABLE		0x0178
100#define CPMAC_TX_INT_CLEAR		0x017c
101#define CPMAC_MAC_INT_VECTOR		0x0180
102# define MAC_INT_STATUS			0x00080000
103# define MAC_INT_HOST			0x00040000
104# define MAC_INT_RX			0x00020000
105# define MAC_INT_TX			0x00010000
106#define CPMAC_MAC_EOI_VECTOR		0x0184
107#define CPMAC_RX_INT_ENABLE		0x0198
108#define CPMAC_RX_INT_CLEAR		0x019c
109#define CPMAC_MAC_INT_ENABLE		0x01a8
110#define CPMAC_MAC_INT_CLEAR		0x01ac
111#define CPMAC_MAC_ADDR_LO(channel) 	(0x01b0 + (channel) * 4)
112#define CPMAC_MAC_ADDR_MID		0x01d0
113#define CPMAC_MAC_ADDR_HI		0x01d4
114#define CPMAC_MAC_HASH_LO		0x01d8
115#define CPMAC_MAC_HASH_HI		0x01dc
116#define CPMAC_TX_PTR(channel)		(0x0600 + (channel) * 4)
117#define CPMAC_RX_PTR(channel)		(0x0620 + (channel) * 4)
118#define CPMAC_TX_ACK(channel)		(0x0640 + (channel) * 4)
119#define CPMAC_RX_ACK(channel)		(0x0660 + (channel) * 4)
120#define CPMAC_REG_END			0x0680
121/*
122 * Rx/Tx statistics
123 * TODO: use some of them to fill stats in cpmac_stats()
124 */
125#define CPMAC_STATS_RX_GOOD		0x0200
126#define CPMAC_STATS_RX_BCAST		0x0204
127#define CPMAC_STATS_RX_MCAST		0x0208
128#define CPMAC_STATS_RX_PAUSE		0x020c
129#define CPMAC_STATS_RX_CRC		0x0210
130#define CPMAC_STATS_RX_ALIGN		0x0214
131#define CPMAC_STATS_RX_OVER		0x0218
132#define CPMAC_STATS_RX_JABBER		0x021c
133#define CPMAC_STATS_RX_UNDER		0x0220
134#define CPMAC_STATS_RX_FRAG		0x0224
135#define CPMAC_STATS_RX_FILTER		0x0228
136#define CPMAC_STATS_RX_QOSFILTER	0x022c
137#define CPMAC_STATS_RX_OCTETS		0x0230
138
139#define CPMAC_STATS_TX_GOOD		0x0234
140#define CPMAC_STATS_TX_BCAST		0x0238
141#define CPMAC_STATS_TX_MCAST		0x023c
142#define CPMAC_STATS_TX_PAUSE		0x0240
143#define CPMAC_STATS_TX_DEFER		0x0244
144#define CPMAC_STATS_TX_COLLISION	0x0248
145#define CPMAC_STATS_TX_SINGLECOLL	0x024c
146#define CPMAC_STATS_TX_MULTICOLL	0x0250
147#define CPMAC_STATS_TX_EXCESSCOLL	0x0254
148#define CPMAC_STATS_TX_LATECOLL		0x0258
149#define CPMAC_STATS_TX_UNDERRUN		0x025c
150#define CPMAC_STATS_TX_CARRIERSENSE	0x0260
151#define CPMAC_STATS_TX_OCTETS		0x0264
152
153#define cpmac_read(base, reg)		(readl((void __iomem *)(base) + (reg)))
154#define cpmac_write(base, reg, val)	(writel(val, (void __iomem *)(base) + \
155						(reg)))
156
157/* MDIO bus */
158#define CPMAC_MDIO_VERSION		0x0000
159#define CPMAC_MDIO_CONTROL		0x0004
160# define MDIOC_IDLE			0x80000000
161# define MDIOC_ENABLE			0x40000000
162# define MDIOC_PREAMBLE			0x00100000
163# define MDIOC_FAULT			0x00080000
164# define MDIOC_FAULTDETECT		0x00040000
165# define MDIOC_INTTEST			0x00020000
166# define MDIOC_CLKDIV(div)		((div) & 0xff)
167#define CPMAC_MDIO_ALIVE		0x0008
168#define CPMAC_MDIO_LINK			0x000c
169#define CPMAC_MDIO_ACCESS(channel)	(0x0080 + (channel) * 8)
170# define MDIO_BUSY			0x80000000
171# define MDIO_WRITE			0x40000000
172# define MDIO_REG(reg)			(((reg) & 0x1f) << 21)
173# define MDIO_PHY(phy)			(((phy) & 0x1f) << 16)
174# define MDIO_DATA(data)		((data) & 0xffff)
175#define CPMAC_MDIO_PHYSEL(channel)	(0x0084 + (channel) * 8)
176# define PHYSEL_LINKSEL			0x00000040
177# define PHYSEL_LINKINT			0x00000020
178
179struct cpmac_desc {
180	u32 hw_next;
181	u32 hw_data;
182	u16 buflen;
183	u16 bufflags;
184	u16 datalen;
185	u16 dataflags;
186#define CPMAC_SOP			0x8000
187#define CPMAC_EOP			0x4000
188#define CPMAC_OWN			0x2000
189#define CPMAC_EOQ			0x1000
190	struct sk_buff *skb;
191	struct cpmac_desc *next;
192	struct cpmac_desc *prev;
193	dma_addr_t mapping;
194	dma_addr_t data_mapping;
195};
196
197struct cpmac_priv {
198	spinlock_t lock;
199	spinlock_t rx_lock;
200	struct cpmac_desc *rx_head;
201	int ring_size;
202	struct cpmac_desc *desc_ring;
203	dma_addr_t dma_ring;
204	void __iomem *regs;
205	struct mii_bus *mii_bus;
206	struct phy_device *phy;
207	char phy_name[MII_BUS_ID_SIZE + 3];
208	int oldlink, oldspeed, oldduplex;
209	u32 msg_enable;
210	struct net_device *dev;
211	struct work_struct reset_work;
212	struct platform_device *pdev;
213	struct napi_struct napi;
214	atomic_t reset_pending;
215};
216
217static irqreturn_t cpmac_irq(int, void *);
218static void cpmac_hw_start(struct net_device *dev);
219static void cpmac_hw_stop(struct net_device *dev);
220static int cpmac_stop(struct net_device *dev);
221static int cpmac_open(struct net_device *dev);
222
223static void cpmac_dump_regs(struct net_device *dev)
224{
225	int i;
226	struct cpmac_priv *priv = netdev_priv(dev);
227	for (i = 0; i < CPMAC_REG_END; i += 4) {
228		if (i % 16 == 0) {
229			if (i)
230				printk("\n");
231			printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
232			       priv->regs + i);
233		}
234		printk(" %08x", cpmac_read(priv->regs, i));
235	}
236	printk("\n");
237}
238
239static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
240{
241	int i;
242	printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
243	for (i = 0; i < sizeof(*desc) / 4; i++)
244		printk(" %08x", ((u32 *)desc)[i]);
245	printk("\n");
246}
247
248static void cpmac_dump_all_desc(struct net_device *dev)
249{
250	struct cpmac_priv *priv = netdev_priv(dev);
251	struct cpmac_desc *dump = priv->rx_head;
252	do {
253		cpmac_dump_desc(dev, dump);
254		dump = dump->next;
255	} while (dump != priv->rx_head);
256}
257
258static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
259{
260	int i;
261	printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
262	for (i = 0; i < skb->len; i++) {
263		if (i % 16 == 0) {
264			if (i)
265				printk("\n");
266			printk(KERN_DEBUG "%s: data[%p]:", dev->name,
267			       skb->data + i);
268		}
269		printk(" %02x", ((u8 *)skb->data)[i]);
270	}
271	printk("\n");
272}
273
274static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
275{
276	u32 val;
277
278	while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
279		cpu_relax();
280	cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
281		    MDIO_PHY(phy_id));
282	while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
283		cpu_relax();
284	return MDIO_DATA(val);
285}
286
287static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
288			    int reg, u16 val)
289{
290	while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
291		cpu_relax();
292	cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
293		    MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
294	return 0;
295}
296
297static int cpmac_mdio_reset(struct mii_bus *bus)
298{
299	struct clk *cpmac_clk;
300
301	cpmac_clk = clk_get(&bus->dev, "cpmac");
302	if (IS_ERR(cpmac_clk)) {
303		printk(KERN_ERR "unable to get cpmac clock\n");
304		return -1;
305	}
306	ar7_device_reset(AR7_RESET_BIT_MDIO);
307	cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
308		    MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
309	return 0;
310}
311
312static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
313
314static struct mii_bus *cpmac_mii;
315
316static int cpmac_config(struct net_device *dev, struct ifmap *map)
317{
318	if (dev->flags & IFF_UP)
319		return -EBUSY;
320
321	/* Don't allow changing the I/O address */
322	if (map->base_addr != dev->base_addr)
323		return -EOPNOTSUPP;
324
325	/* ignore other fields */
326	return 0;
327}
328
329static void cpmac_set_multicast_list(struct net_device *dev)
330{
331	struct netdev_hw_addr *ha;
332	u8 tmp;
333	u32 mbp, bit, hash[2] = { 0, };
334	struct cpmac_priv *priv = netdev_priv(dev);
335
336	mbp = cpmac_read(priv->regs, CPMAC_MBP);
337	if (dev->flags & IFF_PROMISC) {
338		cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
339			    MBP_RXPROMISC);
340	} else {
341		cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
342		if (dev->flags & IFF_ALLMULTI) {
343			/* enable all multicast mode */
344			cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
345			cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
346		} else {
347			/*
348			 * cpmac uses some strange mac address hashing
349			 * (not crc32)
350			 */
351			netdev_for_each_mc_addr(ha, dev) {
352				bit = 0;
353				tmp = ha->addr[0];
354				bit  ^= (tmp >> 2) ^ (tmp << 4);
355				tmp = ha->addr[1];
356				bit  ^= (tmp >> 4) ^ (tmp << 2);
357				tmp = ha->addr[2];
358				bit  ^= (tmp >> 6) ^ tmp;
359				tmp = ha->addr[3];
360				bit  ^= (tmp >> 2) ^ (tmp << 4);
361				tmp = ha->addr[4];
362				bit  ^= (tmp >> 4) ^ (tmp << 2);
363				tmp = ha->addr[5];
364				bit  ^= (tmp >> 6) ^ tmp;
365				bit &= 0x3f;
366				hash[bit / 32] |= 1 << (bit % 32);
367			}
368
369			cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
370			cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
371		}
372	}
373}
374
375static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
376				    struct cpmac_desc *desc)
377{
378	struct sk_buff *skb, *result = NULL;
379
380	if (unlikely(netif_msg_hw(priv)))
381		cpmac_dump_desc(priv->dev, desc);
382	cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
383	if (unlikely(!desc->datalen)) {
384		if (netif_msg_rx_err(priv) && net_ratelimit())
385			printk(KERN_WARNING "%s: rx: spurious interrupt\n",
386			       priv->dev->name);
387		return NULL;
388	}
389
390	skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
391	if (likely(skb)) {
392		skb_put(desc->skb, desc->datalen);
393		desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
394		desc->skb->ip_summed = CHECKSUM_NONE;
395		priv->dev->stats.rx_packets++;
396		priv->dev->stats.rx_bytes += desc->datalen;
397		result = desc->skb;
398		dma_unmap_single(&priv->dev->dev, desc->data_mapping,
399				 CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
400		desc->skb = skb;
401		desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
402						    CPMAC_SKB_SIZE,
403						    DMA_FROM_DEVICE);
404		desc->hw_data = (u32)desc->data_mapping;
405		if (unlikely(netif_msg_pktdata(priv))) {
406			printk(KERN_DEBUG "%s: received packet:\n",
407			       priv->dev->name);
408			cpmac_dump_skb(priv->dev, result);
409		}
410	} else {
411		if (netif_msg_rx_err(priv) && net_ratelimit())
412			printk(KERN_WARNING
413			       "%s: low on skbs, dropping packet\n",
414			       priv->dev->name);
415		priv->dev->stats.rx_dropped++;
416	}
417
418	desc->buflen = CPMAC_SKB_SIZE;
419	desc->dataflags = CPMAC_OWN;
420
421	return result;
422}
423
424static int cpmac_poll(struct napi_struct *napi, int budget)
425{
426	struct sk_buff *skb;
427	struct cpmac_desc *desc, *restart;
428	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
429	int received = 0, processed = 0;
430
431	spin_lock(&priv->rx_lock);
432	if (unlikely(!priv->rx_head)) {
433		if (netif_msg_rx_err(priv) && net_ratelimit())
434			printk(KERN_WARNING "%s: rx: polling, but no queue\n",
435			       priv->dev->name);
436		spin_unlock(&priv->rx_lock);
437		napi_complete(napi);
438		return 0;
439	}
440
441	desc = priv->rx_head;
442	restart = NULL;
443	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
444		processed++;
445
446		if ((desc->dataflags & CPMAC_EOQ) != 0) {
447			/* The last update to eoq->hw_next didn't happen
448			* soon enough, and the receiver stopped here.
449			*Remember this descriptor so we can restart
450			* the receiver after freeing some space.
451			*/
452			if (unlikely(restart)) {
453				if (netif_msg_rx_err(priv))
454					printk(KERN_ERR "%s: poll found a"
455						" duplicate EOQ: %p and %p\n",
456						priv->dev->name, restart, desc);
457				goto fatal_error;
458			}
459
460			restart = desc->next;
461		}
462
463		skb = cpmac_rx_one(priv, desc);
464		if (likely(skb)) {
465			netif_receive_skb(skb);
466			received++;
467		}
468		desc = desc->next;
469	}
470
471	if (desc != priv->rx_head) {
472		/* We freed some buffers, but not the whole ring,
473		 * add what we did free to the rx list */
474		desc->prev->hw_next = (u32)0;
475		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
476	}
477
478	/* Optimization: If we did not actually process an EOQ (perhaps because
479	 * of quota limits), check to see if the tail of the queue has EOQ set.
480	* We should immediately restart in that case so that the receiver can
481	* restart and run in parallel with more packet processing.
482	* This lets us handle slightly larger bursts before running
483	* out of ring space (assuming dev->weight < ring_size) */
484
485	if (!restart &&
486	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
487		    == CPMAC_EOQ &&
488	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
489		/* reset EOQ so the poll loop (above) doesn't try to
490		* restart this when it eventually gets to this descriptor.
491		*/
492		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
493		restart = priv->rx_head;
494	}
495
496	if (restart) {
497		priv->dev->stats.rx_errors++;
498		priv->dev->stats.rx_fifo_errors++;
499		if (netif_msg_rx_err(priv) && net_ratelimit())
500			printk(KERN_WARNING "%s: rx dma ring overrun\n",
501			       priv->dev->name);
502
503		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
504			if (netif_msg_drv(priv))
505				printk(KERN_ERR "%s: cpmac_poll is trying to "
506					"restart rx from a descriptor that's "
507					"not free: %p\n",
508					priv->dev->name, restart);
509				goto fatal_error;
510		}
511
512		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
513	}
514
515	priv->rx_head = desc;
516	spin_unlock(&priv->rx_lock);
517	if (unlikely(netif_msg_rx_status(priv)))
518		printk(KERN_DEBUG "%s: poll processed %d packets\n",
519		       priv->dev->name, received);
520	if (processed == 0) {
521		/* we ran out of packets to read,
522		 * revert to interrupt-driven mode */
523		napi_complete(napi);
524		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
525		return 0;
526	}
527
528	return 1;
529
530fatal_error:
531	/* Something went horribly wrong.
532	 * Reset hardware to try to recover rather than wedging. */
533
534	if (netif_msg_drv(priv)) {
535		printk(KERN_ERR "%s: cpmac_poll is confused. "
536				"Resetting hardware\n", priv->dev->name);
537		cpmac_dump_all_desc(priv->dev);
538		printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
539			priv->dev->name,
540			cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
541			cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
542	}
543
544	spin_unlock(&priv->rx_lock);
545	napi_complete(napi);
546	netif_tx_stop_all_queues(priv->dev);
547	napi_disable(&priv->napi);
548
549	atomic_inc(&priv->reset_pending);
550	cpmac_hw_stop(priv->dev);
551	if (!schedule_work(&priv->reset_work))
552		atomic_dec(&priv->reset_pending);
553	return 0;
554
555}
556
557static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
558{
559	int queue, len;
560	struct cpmac_desc *desc;
561	struct cpmac_priv *priv = netdev_priv(dev);
562
563	if (unlikely(atomic_read(&priv->reset_pending)))
564		return NETDEV_TX_BUSY;
565
566	if (unlikely(skb_padto(skb, ETH_ZLEN)))
567		return NETDEV_TX_OK;
568
569	len = max(skb->len, ETH_ZLEN);
570	queue = skb_get_queue_mapping(skb);
571	netif_stop_subqueue(dev, queue);
572
573	desc = &priv->desc_ring[queue];
574	if (unlikely(desc->dataflags & CPMAC_OWN)) {
575		if (netif_msg_tx_err(priv) && net_ratelimit())
576			printk(KERN_WARNING "%s: tx dma ring full\n",
577			       dev->name);
578		return NETDEV_TX_BUSY;
579	}
580
581	spin_lock(&priv->lock);
582	spin_unlock(&priv->lock);
583	desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
584	desc->skb = skb;
585	desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
586					    DMA_TO_DEVICE);
587	desc->hw_data = (u32)desc->data_mapping;
588	desc->datalen = len;
589	desc->buflen = len;
590	if (unlikely(netif_msg_tx_queued(priv)))
591		printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
592		       skb->len);
593	if (unlikely(netif_msg_hw(priv)))
594		cpmac_dump_desc(dev, desc);
595	if (unlikely(netif_msg_pktdata(priv)))
596		cpmac_dump_skb(dev, skb);
597	cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
598
599	return NETDEV_TX_OK;
600}
601
602static void cpmac_end_xmit(struct net_device *dev, int queue)
603{
604	struct cpmac_desc *desc;
605	struct cpmac_priv *priv = netdev_priv(dev);
606
607	desc = &priv->desc_ring[queue];
608	cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
609	if (likely(desc->skb)) {
610		spin_lock(&priv->lock);
611		dev->stats.tx_packets++;
612		dev->stats.tx_bytes += desc->skb->len;
613		spin_unlock(&priv->lock);
614		dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
615				 DMA_TO_DEVICE);
616
617		if (unlikely(netif_msg_tx_done(priv)))
618			printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
619			       desc->skb, desc->skb->len);
620
621		dev_kfree_skb_irq(desc->skb);
622		desc->skb = NULL;
623		if (__netif_subqueue_stopped(dev, queue))
624			netif_wake_subqueue(dev, queue);
625	} else {
626		if (netif_msg_tx_err(priv) && net_ratelimit())
627			printk(KERN_WARNING
628			       "%s: end_xmit: spurious interrupt\n", dev->name);
629		if (__netif_subqueue_stopped(dev, queue))
630			netif_wake_subqueue(dev, queue);
631	}
632}
633
634static void cpmac_hw_stop(struct net_device *dev)
635{
636	int i;
637	struct cpmac_priv *priv = netdev_priv(dev);
638	struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
639
640	ar7_device_reset(pdata->reset_bit);
641	cpmac_write(priv->regs, CPMAC_RX_CONTROL,
642		    cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
643	cpmac_write(priv->regs, CPMAC_TX_CONTROL,
644		    cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
645	for (i = 0; i < 8; i++) {
646		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
647		cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
648	}
649	cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
650	cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
651	cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
652	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
653	cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
654		    cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
655}
656
657static void cpmac_hw_start(struct net_device *dev)
658{
659	int i;
660	struct cpmac_priv *priv = netdev_priv(dev);
661	struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
662
663	ar7_device_reset(pdata->reset_bit);
664	for (i = 0; i < 8; i++) {
665		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
666		cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
667	}
668	cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
669
670	cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
671		    MBP_RXMCAST);
672	cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
673	for (i = 0; i < 8; i++)
674		cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
675	cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
676	cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
677		    (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
678		    (dev->dev_addr[3] << 24));
679	cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
680	cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
681	cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
682	cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
683	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
684	cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
685	cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
686	cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
687	cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
688
689	cpmac_write(priv->regs, CPMAC_RX_CONTROL,
690		    cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
691	cpmac_write(priv->regs, CPMAC_TX_CONTROL,
692		    cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
693	cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
694		    cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
695		    MAC_FDX);
696}
697
698static void cpmac_clear_rx(struct net_device *dev)
699{
700	struct cpmac_priv *priv = netdev_priv(dev);
701	struct cpmac_desc *desc;
702	int i;
703	if (unlikely(!priv->rx_head))
704		return;
705	desc = priv->rx_head;
706	for (i = 0; i < priv->ring_size; i++) {
707		if ((desc->dataflags & CPMAC_OWN) == 0) {
708			if (netif_msg_rx_err(priv) && net_ratelimit())
709				printk(KERN_WARNING "%s: packet dropped\n",
710				       dev->name);
711			if (unlikely(netif_msg_hw(priv)))
712				cpmac_dump_desc(dev, desc);
713			desc->dataflags = CPMAC_OWN;
714			dev->stats.rx_dropped++;
715		}
716		desc->hw_next = desc->next->mapping;
717		desc = desc->next;
718	}
719	priv->rx_head->prev->hw_next = 0;
720}
721
722static void cpmac_clear_tx(struct net_device *dev)
723{
724	struct cpmac_priv *priv = netdev_priv(dev);
725	int i;
726	if (unlikely(!priv->desc_ring))
727		return;
728	for (i = 0; i < CPMAC_QUEUES; i++) {
729		priv->desc_ring[i].dataflags = 0;
730		if (priv->desc_ring[i].skb) {
731			dev_kfree_skb_any(priv->desc_ring[i].skb);
732			priv->desc_ring[i].skb = NULL;
733		}
734	}
735}
736
737static void cpmac_hw_error(struct work_struct *work)
738{
739	struct cpmac_priv *priv =
740		container_of(work, struct cpmac_priv, reset_work);
741
742	spin_lock(&priv->rx_lock);
743	cpmac_clear_rx(priv->dev);
744	spin_unlock(&priv->rx_lock);
745	cpmac_clear_tx(priv->dev);
746	cpmac_hw_start(priv->dev);
747	barrier();
748	atomic_dec(&priv->reset_pending);
749
750	netif_tx_wake_all_queues(priv->dev);
751	cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
752}
753
754static void cpmac_check_status(struct net_device *dev)
755{
756	struct cpmac_priv *priv = netdev_priv(dev);
757
758	u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
759	int rx_channel = (macstatus >> 8) & 7;
760	int rx_code = (macstatus >> 12) & 15;
761	int tx_channel = (macstatus >> 16) & 7;
762	int tx_code = (macstatus >> 20) & 15;
763
764	if (rx_code || tx_code) {
765		if (netif_msg_drv(priv) && net_ratelimit()) {
766			/* Can't find any documentation on what these
767			 *error codes actually are. So just log them and hope..
768			 */
769			if (rx_code)
770				printk(KERN_WARNING "%s: host error %d on rx "
771				     "channel %d (macstatus %08x), resetting\n",
772				     dev->name, rx_code, rx_channel, macstatus);
773			if (tx_code)
774				printk(KERN_WARNING "%s: host error %d on tx "
775				     "channel %d (macstatus %08x), resetting\n",
776				     dev->name, tx_code, tx_channel, macstatus);
777		}
778
779		netif_tx_stop_all_queues(dev);
780		cpmac_hw_stop(dev);
781		if (schedule_work(&priv->reset_work))
782			atomic_inc(&priv->reset_pending);
783		if (unlikely(netif_msg_hw(priv)))
784			cpmac_dump_regs(dev);
785	}
786	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
787}
788
789static irqreturn_t cpmac_irq(int irq, void *dev_id)
790{
791	struct net_device *dev = dev_id;
792	struct cpmac_priv *priv;
793	int queue;
794	u32 status;
795
796	priv = netdev_priv(dev);
797
798	status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
799
800	if (unlikely(netif_msg_intr(priv)))
801		printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
802		       status);
803
804	if (status & MAC_INT_TX)
805		cpmac_end_xmit(dev, (status & 7));
806
807	if (status & MAC_INT_RX) {
808		queue = (status >> 8) & 7;
809		if (napi_schedule_prep(&priv->napi)) {
810			cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
811			__napi_schedule(&priv->napi);
812		}
813	}
814
815	cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
816
817	if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
818		cpmac_check_status(dev);
819
820	return IRQ_HANDLED;
821}
822
823static void cpmac_tx_timeout(struct net_device *dev)
824{
825	struct cpmac_priv *priv = netdev_priv(dev);
826
827	spin_lock(&priv->lock);
828	dev->stats.tx_errors++;
829	spin_unlock(&priv->lock);
830	if (netif_msg_tx_err(priv) && net_ratelimit())
831		printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
832
833	atomic_inc(&priv->reset_pending);
834	barrier();
835	cpmac_clear_tx(dev);
836	barrier();
837	atomic_dec(&priv->reset_pending);
838
839	netif_tx_wake_all_queues(priv->dev);
840}
841
842static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
843{
844	struct cpmac_priv *priv = netdev_priv(dev);
845	if (!(netif_running(dev)))
846		return -EINVAL;
847	if (!priv->phy)
848		return -EINVAL;
849
850	return phy_mii_ioctl(priv->phy, ifr, cmd);
851}
852
853static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
854{
855	struct cpmac_priv *priv = netdev_priv(dev);
856
857	if (priv->phy)
858		return phy_ethtool_gset(priv->phy, cmd);
859
860	return -EINVAL;
861}
862
863static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
864{
865	struct cpmac_priv *priv = netdev_priv(dev);
866
867	if (!capable(CAP_NET_ADMIN))
868		return -EPERM;
869
870	if (priv->phy)
871		return phy_ethtool_sset(priv->phy, cmd);
872
873	return -EINVAL;
874}
875
876static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
877{
878	struct cpmac_priv *priv = netdev_priv(dev);
879
880	ring->rx_max_pending = 1024;
881	ring->rx_mini_max_pending = 1;
882	ring->rx_jumbo_max_pending = 1;
883	ring->tx_max_pending = 1;
884
885	ring->rx_pending = priv->ring_size;
886	ring->rx_mini_pending = 1;
887	ring->rx_jumbo_pending = 1;
888	ring->tx_pending = 1;
889}
890
891static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
892{
893	struct cpmac_priv *priv = netdev_priv(dev);
894
895	if (netif_running(dev))
896		return -EBUSY;
897	priv->ring_size = ring->rx_pending;
898	return 0;
899}
900
901static void cpmac_get_drvinfo(struct net_device *dev,
902			      struct ethtool_drvinfo *info)
903{
904	strcpy(info->driver, "cpmac");
905	strcpy(info->version, CPMAC_VERSION);
906	info->fw_version[0] = '\0';
907	sprintf(info->bus_info, "%s", "cpmac");
908	info->regdump_len = 0;
909}
910
911static const struct ethtool_ops cpmac_ethtool_ops = {
912	.get_settings = cpmac_get_settings,
913	.set_settings = cpmac_set_settings,
914	.get_drvinfo = cpmac_get_drvinfo,
915	.get_link = ethtool_op_get_link,
916	.get_ringparam = cpmac_get_ringparam,
917	.set_ringparam = cpmac_set_ringparam,
918};
919
920static void cpmac_adjust_link(struct net_device *dev)
921{
922	struct cpmac_priv *priv = netdev_priv(dev);
923	int new_state = 0;
924
925	spin_lock(&priv->lock);
926	if (priv->phy->link) {
927		netif_tx_start_all_queues(dev);
928		if (priv->phy->duplex != priv->oldduplex) {
929			new_state = 1;
930			priv->oldduplex = priv->phy->duplex;
931		}
932
933		if (priv->phy->speed != priv->oldspeed) {
934			new_state = 1;
935			priv->oldspeed = priv->phy->speed;
936		}
937
938		if (!priv->oldlink) {
939			new_state = 1;
940			priv->oldlink = 1;
941		}
942	} else if (priv->oldlink) {
943		new_state = 1;
944		priv->oldlink = 0;
945		priv->oldspeed = 0;
946		priv->oldduplex = -1;
947	}
948
949	if (new_state && netif_msg_link(priv) && net_ratelimit())
950		phy_print_status(priv->phy);
951
952	spin_unlock(&priv->lock);
953}
954
955static int cpmac_open(struct net_device *dev)
956{
957	int i, size, res;
958	struct cpmac_priv *priv = netdev_priv(dev);
959	struct resource *mem;
960	struct cpmac_desc *desc;
961	struct sk_buff *skb;
962
963	mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
964	if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
965		if (netif_msg_drv(priv))
966			printk(KERN_ERR "%s: failed to request registers\n",
967			       dev->name);
968		res = -ENXIO;
969		goto fail_reserve;
970	}
971
972	priv->regs = ioremap(mem->start, resource_size(mem));
973	if (!priv->regs) {
974		if (netif_msg_drv(priv))
975			printk(KERN_ERR "%s: failed to remap registers\n",
976			       dev->name);
977		res = -ENXIO;
978		goto fail_remap;
979	}
980
981	size = priv->ring_size + CPMAC_QUEUES;
982	priv->desc_ring = dma_alloc_coherent(&dev->dev,
983					     sizeof(struct cpmac_desc) * size,
984					     &priv->dma_ring,
985					     GFP_KERNEL);
986	if (!priv->desc_ring) {
987		res = -ENOMEM;
988		goto fail_alloc;
989	}
990
991	for (i = 0; i < size; i++)
992		priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
993
994	priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
995	for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
996		skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
997		if (unlikely(!skb)) {
998			res = -ENOMEM;
999			goto fail_desc;
1000		}
1001		desc->skb = skb;
1002		desc->data_mapping = dma_map_single(&dev->dev, skb->data,
1003						    CPMAC_SKB_SIZE,
1004						    DMA_FROM_DEVICE);
1005		desc->hw_data = (u32)desc->data_mapping;
1006		desc->buflen = CPMAC_SKB_SIZE;
1007		desc->dataflags = CPMAC_OWN;
1008		desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
1009		desc->next->prev = desc;
1010		desc->hw_next = (u32)desc->next->mapping;
1011	}
1012
1013	priv->rx_head->prev->hw_next = (u32)0;
1014
1015	if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
1016			       dev->name, dev))) {
1017		if (netif_msg_drv(priv))
1018			printk(KERN_ERR "%s: failed to obtain irq\n",
1019			       dev->name);
1020		goto fail_irq;
1021	}
1022
1023	atomic_set(&priv->reset_pending, 0);
1024	INIT_WORK(&priv->reset_work, cpmac_hw_error);
1025	cpmac_hw_start(dev);
1026
1027	napi_enable(&priv->napi);
1028	priv->phy->state = PHY_CHANGELINK;
1029	phy_start(priv->phy);
1030
1031	return 0;
1032
1033fail_irq:
1034fail_desc:
1035	for (i = 0; i < priv->ring_size; i++) {
1036		if (priv->rx_head[i].skb) {
1037			dma_unmap_single(&dev->dev,
1038					 priv->rx_head[i].data_mapping,
1039					 CPMAC_SKB_SIZE,
1040					 DMA_FROM_DEVICE);
1041			kfree_skb(priv->rx_head[i].skb);
1042		}
1043	}
1044fail_alloc:
1045	kfree(priv->desc_ring);
1046	iounmap(priv->regs);
1047
1048fail_remap:
1049	release_mem_region(mem->start, resource_size(mem));
1050
1051fail_reserve:
1052	return res;
1053}
1054
1055static int cpmac_stop(struct net_device *dev)
1056{
1057	int i;
1058	struct cpmac_priv *priv = netdev_priv(dev);
1059	struct resource *mem;
1060
1061	netif_tx_stop_all_queues(dev);
1062
1063	cancel_work_sync(&priv->reset_work);
1064	napi_disable(&priv->napi);
1065	phy_stop(priv->phy);
1066
1067	cpmac_hw_stop(dev);
1068
1069	for (i = 0; i < 8; i++)
1070		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
1071	cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
1072	cpmac_write(priv->regs, CPMAC_MBP, 0);
1073
1074	free_irq(dev->irq, dev);
1075	iounmap(priv->regs);
1076	mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
1077	release_mem_region(mem->start, resource_size(mem));
1078	priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
1079	for (i = 0; i < priv->ring_size; i++) {
1080		if (priv->rx_head[i].skb) {
1081			dma_unmap_single(&dev->dev,
1082					 priv->rx_head[i].data_mapping,
1083					 CPMAC_SKB_SIZE,
1084					 DMA_FROM_DEVICE);
1085			kfree_skb(priv->rx_head[i].skb);
1086		}
1087	}
1088
1089	dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
1090			  (CPMAC_QUEUES + priv->ring_size),
1091			  priv->desc_ring, priv->dma_ring);
1092	return 0;
1093}
1094
1095static const struct net_device_ops cpmac_netdev_ops = {
1096	.ndo_open		= cpmac_open,
1097	.ndo_stop		= cpmac_stop,
1098	.ndo_start_xmit		= cpmac_start_xmit,
1099	.ndo_tx_timeout		= cpmac_tx_timeout,
1100	.ndo_set_multicast_list	= cpmac_set_multicast_list,
1101	.ndo_do_ioctl		= cpmac_ioctl,
1102	.ndo_set_config		= cpmac_config,
1103	.ndo_change_mtu		= eth_change_mtu,
1104	.ndo_validate_addr	= eth_validate_addr,
1105	.ndo_set_mac_address	= eth_mac_addr,
1106};
1107
1108static int external_switch;
1109
1110static int __devinit cpmac_probe(struct platform_device *pdev)
1111{
1112	int rc, phy_id;
1113	char mdio_bus_id[MII_BUS_ID_SIZE];
1114	struct resource *mem;
1115	struct cpmac_priv *priv;
1116	struct net_device *dev;
1117	struct plat_cpmac_data *pdata;
1118
1119	pdata = pdev->dev.platform_data;
1120
1121	if (external_switch || dumb_switch) {
1122		strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
1123		phy_id = pdev->id;
1124	} else {
1125		for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1126			if (!(pdata->phy_mask & (1 << phy_id)))
1127				continue;
1128			if (!cpmac_mii->phy_map[phy_id])
1129				continue;
1130			strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
1131			break;
1132		}
1133	}
1134
1135	if (phy_id == PHY_MAX_ADDR) {
1136		dev_err(&pdev->dev, "no PHY present, falling back to switch on MDIO bus 0\n");
1137		strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
1138		phy_id = pdev->id;
1139	}
1140
1141	dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
1142
1143	if (!dev) {
1144		printk(KERN_ERR "cpmac: Unable to allocate net_device\n");
1145		return -ENOMEM;
1146	}
1147
1148	platform_set_drvdata(pdev, dev);
1149	priv = netdev_priv(dev);
1150
1151	priv->pdev = pdev;
1152	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1153	if (!mem) {
1154		rc = -ENODEV;
1155		goto fail;
1156	}
1157
1158	dev->irq = platform_get_irq_byname(pdev, "irq");
1159
1160	dev->netdev_ops = &cpmac_netdev_ops;
1161	dev->ethtool_ops = &cpmac_ethtool_ops;
1162
1163	netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
1164
1165	spin_lock_init(&priv->lock);
1166	spin_lock_init(&priv->rx_lock);
1167	priv->dev = dev;
1168	priv->ring_size = 64;
1169	priv->msg_enable = netif_msg_init(debug_level, 0xff);
1170	memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
1171
1172	snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
1173
1174	priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
1175						PHY_INTERFACE_MODE_MII);
1176
1177	if (IS_ERR(priv->phy)) {
1178		if (netif_msg_drv(priv))
1179			printk(KERN_ERR "%s: Could not attach to PHY\n",
1180			       dev->name);
1181		rc = PTR_ERR(priv->phy);
1182		goto fail;
1183	}
1184
1185	if ((rc = register_netdev(dev))) {
1186		printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
1187		       dev->name);
1188		goto fail;
1189	}
1190
1191	if (netif_msg_probe(priv)) {
1192		printk(KERN_INFO
1193		       "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
1194		       "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq,
1195		       priv->phy_name, dev->dev_addr);
1196	}
1197	return 0;
1198
1199fail:
1200	free_netdev(dev);
1201	return rc;
1202}
1203
1204static int __devexit cpmac_remove(struct platform_device *pdev)
1205{
1206	struct net_device *dev = platform_get_drvdata(pdev);
1207	unregister_netdev(dev);
1208	free_netdev(dev);
1209	return 0;
1210}
1211
1212static struct platform_driver cpmac_driver = {
1213	.driver.name = "cpmac",
1214	.driver.owner = THIS_MODULE,
1215	.probe = cpmac_probe,
1216	.remove = __devexit_p(cpmac_remove),
1217};
1218
1219int __devinit cpmac_init(void)
1220{
1221	u32 mask;
1222	int i, res;
1223
1224	cpmac_mii = mdiobus_alloc();
1225	if (cpmac_mii == NULL)
1226		return -ENOMEM;
1227
1228	cpmac_mii->name = "cpmac-mii";
1229	cpmac_mii->read = cpmac_mdio_read;
1230	cpmac_mii->write = cpmac_mdio_write;
1231	cpmac_mii->reset = cpmac_mdio_reset;
1232	cpmac_mii->irq = mii_irqs;
1233
1234	cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
1235
1236	if (!cpmac_mii->priv) {
1237		printk(KERN_ERR "Can't ioremap mdio registers\n");
1238		res = -ENXIO;
1239		goto fail_alloc;
1240	}
1241
1242#warning FIXME: unhardcode gpio&reset bits
1243	ar7_gpio_disable(26);
1244	ar7_gpio_disable(27);
1245	ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
1246	ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
1247	ar7_device_reset(AR7_RESET_BIT_EPHY);
1248
1249	cpmac_mii->reset(cpmac_mii);
1250
1251	for (i = 0; i < 300; i++)
1252		if ((mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE)))
1253			break;
1254		else
1255			msleep(10);
1256
1257	mask &= 0x7fffffff;
1258	if (mask & (mask - 1)) {
1259		external_switch = 1;
1260		mask = 0;
1261	}
1262
1263	cpmac_mii->phy_mask = ~(mask | 0x80000000);
1264	snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1");
1265
1266	res = mdiobus_register(cpmac_mii);
1267	if (res)
1268		goto fail_mii;
1269
1270	res = platform_driver_register(&cpmac_driver);
1271	if (res)
1272		goto fail_cpmac;
1273
1274	return 0;
1275
1276fail_cpmac:
1277	mdiobus_unregister(cpmac_mii);
1278
1279fail_mii:
1280	iounmap(cpmac_mii->priv);
1281
1282fail_alloc:
1283	mdiobus_free(cpmac_mii);
1284
1285	return res;
1286}
1287
1288void __devexit cpmac_exit(void)
1289{
1290	platform_driver_unregister(&cpmac_driver);
1291	mdiobus_unregister(cpmac_mii);
1292	iounmap(cpmac_mii->priv);
1293	mdiobus_free(cpmac_mii);
1294}
1295
1296module_init(cpmac_init);
1297module_exit(cpmac_exit);
1298