• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/
1/*
2 *  SuperH Ethernet device driver
3 *
4 *  Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 *  Copyright (C) 2008-2009 Renesas Solutions Corp.
6 *
7 *  This program is free software; you can redistribute it and/or modify it
8 *  under the terms and conditions of the GNU General Public License,
9 *  version 2, as published by the Free Software Foundation.
10 *
11 *  This program is distributed in the hope it will be useful, but WITHOUT
12 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14 *  more details.
15 *  You should have received a copy of the GNU General Public License along with
16 *  this program; if not, write to the Free Software Foundation, Inc.,
17 *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *  The full GNU General Public License is included in this distribution in
20 *  the file called "COPYING".
21 */
22
23#include <linux/init.h>
24#include <linux/dma-mapping.h>
25#include <linux/etherdevice.h>
26#include <linux/delay.h>
27#include <linux/platform_device.h>
28#include <linux/mdio-bitbang.h>
29#include <linux/netdevice.h>
30#include <linux/phy.h>
31#include <linux/cache.h>
32#include <linux/io.h>
33#include <linux/pm_runtime.h>
34#include <linux/slab.h>
35#include <asm/cacheflush.h>
36
37#include "sh_eth.h"
38
39/* There is CPU dependent code */
40#if defined(CONFIG_CPU_SUBTYPE_SH7724)
41#define SH_ETH_RESET_DEFAULT	1
42static void sh_eth_set_duplex(struct net_device *ndev)
43{
44	struct sh_eth_private *mdp = netdev_priv(ndev);
45	u32 ioaddr = ndev->base_addr;
46
47	if (mdp->duplex) /* Full */
48		ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
49	else		/* Half */
50		ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
51}
52
53static void sh_eth_set_rate(struct net_device *ndev)
54{
55	struct sh_eth_private *mdp = netdev_priv(ndev);
56	u32 ioaddr = ndev->base_addr;
57
58	switch (mdp->speed) {
59	case 10: /* 10BASE */
60		ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
61		break;
62	case 100:/* 100BASE */
63		ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
64		break;
65	default:
66		break;
67	}
68}
69
70/* SH7724 */
71static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
72	.set_duplex	= sh_eth_set_duplex,
73	.set_rate	= sh_eth_set_rate,
74
75	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
76	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
77	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
78
79	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
80	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
81			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
82	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
83
84	.apr		= 1,
85	.mpr		= 1,
86	.tpauser	= 1,
87	.hw_swap	= 1,
88	.rpadir		= 1,
89	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
90};
91#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
92#define SH_ETH_RESET_DEFAULT	1
93static void sh_eth_set_duplex(struct net_device *ndev)
94{
95	struct sh_eth_private *mdp = netdev_priv(ndev);
96	u32 ioaddr = ndev->base_addr;
97
98	if (mdp->duplex) /* Full */
99		ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
100	else		/* Half */
101		ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
102}
103
104static void sh_eth_set_rate(struct net_device *ndev)
105{
106	struct sh_eth_private *mdp = netdev_priv(ndev);
107	u32 ioaddr = ndev->base_addr;
108
109	switch (mdp->speed) {
110	case 10: /* 10BASE */
111		ctrl_outl(0, ioaddr + RTRATE);
112		break;
113	case 100:/* 100BASE */
114		ctrl_outl(1, ioaddr + RTRATE);
115		break;
116	default:
117		break;
118	}
119}
120
121/* SH7757 */
122static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
123	.set_duplex		= sh_eth_set_duplex,
124	.set_rate		= sh_eth_set_rate,
125
126	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
127	.rmcr_value	= 0x00000001,
128
129	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
130	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
131			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
132	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
133
134	.apr		= 1,
135	.mpr		= 1,
136	.tpauser	= 1,
137	.hw_swap	= 1,
138	.no_ade		= 1,
139};
140
141#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
142#define SH_ETH_HAS_TSU	1
143static void sh_eth_chip_reset(struct net_device *ndev)
144{
145	/* reset device */
146	ctrl_outl(ARSTR_ARSTR, ARSTR);
147	mdelay(1);
148}
149
150static void sh_eth_reset(struct net_device *ndev)
151{
152	u32 ioaddr = ndev->base_addr;
153	int cnt = 100;
154
155	ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
156	ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
157	while (cnt > 0) {
158		if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
159			break;
160		mdelay(1);
161		cnt--;
162	}
163	if (cnt == 0)
164		printk(KERN_ERR "Device reset fail\n");
165
166	/* Table Init */
167	ctrl_outl(0x0, ioaddr + TDLAR);
168	ctrl_outl(0x0, ioaddr + TDFAR);
169	ctrl_outl(0x0, ioaddr + TDFXR);
170	ctrl_outl(0x0, ioaddr + TDFFR);
171	ctrl_outl(0x0, ioaddr + RDLAR);
172	ctrl_outl(0x0, ioaddr + RDFAR);
173	ctrl_outl(0x0, ioaddr + RDFXR);
174	ctrl_outl(0x0, ioaddr + RDFFR);
175}
176
177static void sh_eth_set_duplex(struct net_device *ndev)
178{
179	struct sh_eth_private *mdp = netdev_priv(ndev);
180	u32 ioaddr = ndev->base_addr;
181
182	if (mdp->duplex) /* Full */
183		ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
184	else		/* Half */
185		ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
186}
187
188static void sh_eth_set_rate(struct net_device *ndev)
189{
190	struct sh_eth_private *mdp = netdev_priv(ndev);
191	u32 ioaddr = ndev->base_addr;
192
193	switch (mdp->speed) {
194	case 10: /* 10BASE */
195		ctrl_outl(GECMR_10, ioaddr + GECMR);
196		break;
197	case 100:/* 100BASE */
198		ctrl_outl(GECMR_100, ioaddr + GECMR);
199		break;
200	case 1000: /* 1000BASE */
201		ctrl_outl(GECMR_1000, ioaddr + GECMR);
202		break;
203	default:
204		break;
205	}
206}
207
208/* sh7763 */
209static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
210	.chip_reset	= sh_eth_chip_reset,
211	.set_duplex	= sh_eth_set_duplex,
212	.set_rate	= sh_eth_set_rate,
213
214	.ecsr_value	= ECSR_ICD | ECSR_MPD,
215	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
216	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
217
218	.tx_check	= EESR_TC1 | EESR_FTC,
219	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
220			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
221			  EESR_ECI,
222	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
223			  EESR_TFE,
224
225	.apr		= 1,
226	.mpr		= 1,
227	.tpauser	= 1,
228	.bculr		= 1,
229	.hw_swap	= 1,
230	.no_trimd	= 1,
231	.no_ade		= 1,
232};
233
234#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
235#define SH_ETH_RESET_DEFAULT	1
236static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
237	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
238
239	.apr		= 1,
240	.mpr		= 1,
241	.tpauser	= 1,
242	.hw_swap	= 1,
243};
244#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
245#define SH_ETH_RESET_DEFAULT	1
246#define SH_ETH_HAS_TSU	1
247static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
248	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
249};
250#endif
251
252static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
253{
254	if (!cd->ecsr_value)
255		cd->ecsr_value = DEFAULT_ECSR_INIT;
256
257	if (!cd->ecsipr_value)
258		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
259
260	if (!cd->fcftr_value)
261		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
262				  DEFAULT_FIFO_F_D_RFD;
263
264	if (!cd->fdr_value)
265		cd->fdr_value = DEFAULT_FDR_INIT;
266
267	if (!cd->rmcr_value)
268		cd->rmcr_value = DEFAULT_RMCR_VALUE;
269
270	if (!cd->tx_check)
271		cd->tx_check = DEFAULT_TX_CHECK;
272
273	if (!cd->eesr_err_check)
274		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
275
276	if (!cd->tx_error_check)
277		cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
278}
279
280#if defined(SH_ETH_RESET_DEFAULT)
281/* Chip Reset */
282static void sh_eth_reset(struct net_device *ndev)
283{
284	u32 ioaddr = ndev->base_addr;
285
286	ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
287	mdelay(3);
288	ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
289}
290#endif
291
292#if defined(CONFIG_CPU_SH4)
293static void sh_eth_set_receive_align(struct sk_buff *skb)
294{
295	int reserve;
296
297	reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
298	if (reserve)
299		skb_reserve(skb, reserve);
300}
301#else
302static void sh_eth_set_receive_align(struct sk_buff *skb)
303{
304	skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
305}
306#endif
307
308
309/* CPU <-> EDMAC endian convert */
310static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
311{
312	switch (mdp->edmac_endian) {
313	case EDMAC_LITTLE_ENDIAN:
314		return cpu_to_le32(x);
315	case EDMAC_BIG_ENDIAN:
316		return cpu_to_be32(x);
317	}
318	return x;
319}
320
321static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
322{
323	switch (mdp->edmac_endian) {
324	case EDMAC_LITTLE_ENDIAN:
325		return le32_to_cpu(x);
326	case EDMAC_BIG_ENDIAN:
327		return be32_to_cpu(x);
328	}
329	return x;
330}
331
332/*
333 * Program the hardware MAC address from dev->dev_addr.
334 */
335static void update_mac_address(struct net_device *ndev)
336{
337	u32 ioaddr = ndev->base_addr;
338
339	ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
340		  (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
341		  ioaddr + MAHR);
342	ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
343		  ioaddr + MALR);
344}
345
346/*
347 * Get MAC address from SuperH MAC address register
348 *
349 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
350 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
351 * When you want use this device, you must set MAC address in bootloader.
352 *
353 */
354static void read_mac_address(struct net_device *ndev, unsigned char *mac)
355{
356	u32 ioaddr = ndev->base_addr;
357
358	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
359		memcpy(ndev->dev_addr, mac, 6);
360	} else {
361		ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
362		ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
363		ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
364		ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
365		ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
366		ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
367	}
368}
369
370struct bb_info {
371	struct mdiobb_ctrl ctrl;
372	u32 addr;
373	u32 mmd_msk;/* MMD */
374	u32 mdo_msk;
375	u32 mdi_msk;
376	u32 mdc_msk;
377};
378
379/* PHY bit set */
380static void bb_set(u32 addr, u32 msk)
381{
382	ctrl_outl(ctrl_inl(addr) | msk, addr);
383}
384
385/* PHY bit clear */
386static void bb_clr(u32 addr, u32 msk)
387{
388	ctrl_outl((ctrl_inl(addr) & ~msk), addr);
389}
390
391/* PHY bit read */
392static int bb_read(u32 addr, u32 msk)
393{
394	return (ctrl_inl(addr) & msk) != 0;
395}
396
397/* Data I/O pin control */
398static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
399{
400	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
401	if (bit)
402		bb_set(bitbang->addr, bitbang->mmd_msk);
403	else
404		bb_clr(bitbang->addr, bitbang->mmd_msk);
405}
406
407/* Set bit data*/
408static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
409{
410	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
411
412	if (bit)
413		bb_set(bitbang->addr, bitbang->mdo_msk);
414	else
415		bb_clr(bitbang->addr, bitbang->mdo_msk);
416}
417
418/* Get bit data*/
419static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
420{
421	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
422	return bb_read(bitbang->addr, bitbang->mdi_msk);
423}
424
425/* MDC pin control */
426static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
427{
428	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
429
430	if (bit)
431		bb_set(bitbang->addr, bitbang->mdc_msk);
432	else
433		bb_clr(bitbang->addr, bitbang->mdc_msk);
434}
435
436/* mdio bus control struct */
437static struct mdiobb_ops bb_ops = {
438	.owner = THIS_MODULE,
439	.set_mdc = sh_mdc_ctrl,
440	.set_mdio_dir = sh_mmd_ctrl,
441	.set_mdio_data = sh_set_mdio,
442	.get_mdio_data = sh_get_mdio,
443};
444
445/* free skb and descriptor buffer */
446static void sh_eth_ring_free(struct net_device *ndev)
447{
448	struct sh_eth_private *mdp = netdev_priv(ndev);
449	int i;
450
451	/* Free Rx skb ringbuffer */
452	if (mdp->rx_skbuff) {
453		for (i = 0; i < RX_RING_SIZE; i++) {
454			if (mdp->rx_skbuff[i])
455				dev_kfree_skb(mdp->rx_skbuff[i]);
456		}
457	}
458	kfree(mdp->rx_skbuff);
459
460	/* Free Tx skb ringbuffer */
461	if (mdp->tx_skbuff) {
462		for (i = 0; i < TX_RING_SIZE; i++) {
463			if (mdp->tx_skbuff[i])
464				dev_kfree_skb(mdp->tx_skbuff[i]);
465		}
466	}
467	kfree(mdp->tx_skbuff);
468}
469
470/* format skb and descriptor buffer */
471static void sh_eth_ring_format(struct net_device *ndev)
472{
473	u32 ioaddr = ndev->base_addr;
474	struct sh_eth_private *mdp = netdev_priv(ndev);
475	int i;
476	struct sk_buff *skb;
477	struct sh_eth_rxdesc *rxdesc = NULL;
478	struct sh_eth_txdesc *txdesc = NULL;
479	int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
480	int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
481
482	mdp->cur_rx = mdp->cur_tx = 0;
483	mdp->dirty_rx = mdp->dirty_tx = 0;
484
485	memset(mdp->rx_ring, 0, rx_ringsize);
486
487	/* build Rx ring buffer */
488	for (i = 0; i < RX_RING_SIZE; i++) {
489		/* skb */
490		mdp->rx_skbuff[i] = NULL;
491		skb = dev_alloc_skb(mdp->rx_buf_sz);
492		mdp->rx_skbuff[i] = skb;
493		if (skb == NULL)
494			break;
495		dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
496				DMA_FROM_DEVICE);
497		skb->dev = ndev; /* Mark as being used by this device. */
498		sh_eth_set_receive_align(skb);
499
500		/* RX descriptor */
501		rxdesc = &mdp->rx_ring[i];
502		rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
503		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
504
505		/* The size of the buffer is 16 byte boundary. */
506		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
507		/* Rx descriptor address set */
508		if (i == 0) {
509			ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR);
510#if defined(CONFIG_CPU_SUBTYPE_SH7763)
511			ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR);
512#endif
513		}
514	}
515
516	mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
517
518	/* Mark the last entry as wrapping the ring. */
519	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
520
521	memset(mdp->tx_ring, 0, tx_ringsize);
522
523	/* build Tx ring buffer */
524	for (i = 0; i < TX_RING_SIZE; i++) {
525		mdp->tx_skbuff[i] = NULL;
526		txdesc = &mdp->tx_ring[i];
527		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
528		txdesc->buffer_length = 0;
529		if (i == 0) {
530			/* Tx descriptor address set */
531			ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR);
532#if defined(CONFIG_CPU_SUBTYPE_SH7763)
533			ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR);
534#endif
535		}
536	}
537
538	txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
539}
540
541/* Get skb and descriptor buffer */
542static int sh_eth_ring_init(struct net_device *ndev)
543{
544	struct sh_eth_private *mdp = netdev_priv(ndev);
545	int rx_ringsize, tx_ringsize, ret = 0;
546
547	/*
548	 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
549	 * card needs room to do 8 byte alignment, +2 so we can reserve
550	 * the first 2 bytes, and +16 gets room for the status word from the
551	 * card.
552	 */
553	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
554			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
555	if (mdp->cd->rpadir)
556		mdp->rx_buf_sz += NET_IP_ALIGN;
557
558	/* Allocate RX and TX skb rings */
559	mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
560				GFP_KERNEL);
561	if (!mdp->rx_skbuff) {
562		dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
563		ret = -ENOMEM;
564		return ret;
565	}
566
567	mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
568				GFP_KERNEL);
569	if (!mdp->tx_skbuff) {
570		dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
571		ret = -ENOMEM;
572		goto skb_ring_free;
573	}
574
575	/* Allocate all Rx descriptors. */
576	rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
577	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
578			GFP_KERNEL);
579
580	if (!mdp->rx_ring) {
581		dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
582			rx_ringsize);
583		ret = -ENOMEM;
584		goto desc_ring_free;
585	}
586
587	mdp->dirty_rx = 0;
588
589	/* Allocate all Tx descriptors. */
590	tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
591	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
592			GFP_KERNEL);
593	if (!mdp->tx_ring) {
594		dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
595			tx_ringsize);
596		ret = -ENOMEM;
597		goto desc_ring_free;
598	}
599	return ret;
600
601desc_ring_free:
602	/* free DMA buffer */
603	dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
604
605skb_ring_free:
606	/* Free Rx and Tx skb ring buffer */
607	sh_eth_ring_free(ndev);
608
609	return ret;
610}
611
612static int sh_eth_dev_init(struct net_device *ndev)
613{
614	int ret = 0;
615	struct sh_eth_private *mdp = netdev_priv(ndev);
616	u32 ioaddr = ndev->base_addr;
617	u_int32_t rx_int_var, tx_int_var;
618	u32 val;
619
620	/* Soft Reset */
621	sh_eth_reset(ndev);
622
623	/* Descriptor format */
624	sh_eth_ring_format(ndev);
625	if (mdp->cd->rpadir)
626		ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR);
627
628	/* all sh_eth int mask */
629	ctrl_outl(0, ioaddr + EESIPR);
630
631#if defined(__LITTLE_ENDIAN__)
632	if (mdp->cd->hw_swap)
633		ctrl_outl(EDMR_EL, ioaddr + EDMR);
634	else
635#endif
636		ctrl_outl(0, ioaddr + EDMR);
637
638	/* FIFO size set */
639	ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR);
640	ctrl_outl(0, ioaddr + TFTR);
641
642	/* Frame recv control */
643	ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR);
644
645	rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
646	tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
647	ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
648
649	if (mdp->cd->bculr)
650		ctrl_outl(0x800, ioaddr + BCULR);	/* Burst sycle set */
651
652	ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR);
653
654	if (!mdp->cd->no_trimd)
655		ctrl_outl(0, ioaddr + TRIMD);
656
657	/* Recv frame limit set register */
658	ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
659
660	ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
661	ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR);
662
663	/* PAUSE Prohibition */
664	val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
665		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
666
667	ctrl_outl(val, ioaddr + ECMR);
668
669	if (mdp->cd->set_rate)
670		mdp->cd->set_rate(ndev);
671
672	/* E-MAC Status Register clear */
673	ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR);
674
675	/* E-MAC Interrupt Enable register */
676	ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
677
678	/* Set MAC address */
679	update_mac_address(ndev);
680
681	/* mask reset */
682	if (mdp->cd->apr)
683		ctrl_outl(APR_AP, ioaddr + APR);
684	if (mdp->cd->mpr)
685		ctrl_outl(MPR_MP, ioaddr + MPR);
686	if (mdp->cd->tpauser)
687		ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
688
689	/* Setting the Rx mode will start the Rx process. */
690	ctrl_outl(EDRRR_R, ioaddr + EDRRR);
691
692	netif_start_queue(ndev);
693
694	return ret;
695}
696
697/* free Tx skb function */
698static int sh_eth_txfree(struct net_device *ndev)
699{
700	struct sh_eth_private *mdp = netdev_priv(ndev);
701	struct sh_eth_txdesc *txdesc;
702	int freeNum = 0;
703	int entry = 0;
704
705	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
706		entry = mdp->dirty_tx % TX_RING_SIZE;
707		txdesc = &mdp->tx_ring[entry];
708		if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
709			break;
710		/* Free the original skb. */
711		if (mdp->tx_skbuff[entry]) {
712			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
713			mdp->tx_skbuff[entry] = NULL;
714			freeNum++;
715		}
716		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
717		if (entry >= TX_RING_SIZE - 1)
718			txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
719
720		mdp->stats.tx_packets++;
721		mdp->stats.tx_bytes += txdesc->buffer_length;
722	}
723	return freeNum;
724}
725
726/* Packet receive function */
727static int sh_eth_rx(struct net_device *ndev)
728{
729	struct sh_eth_private *mdp = netdev_priv(ndev);
730	struct sh_eth_rxdesc *rxdesc;
731
732	int entry = mdp->cur_rx % RX_RING_SIZE;
733	int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
734	struct sk_buff *skb;
735	u16 pkt_len = 0;
736	u32 desc_status;
737
738	rxdesc = &mdp->rx_ring[entry];
739	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
740		desc_status = edmac_to_cpu(mdp, rxdesc->status);
741		pkt_len = rxdesc->frame_length;
742
743		if (--boguscnt < 0)
744			break;
745
746		if (!(desc_status & RDFEND))
747			mdp->stats.rx_length_errors++;
748
749		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
750				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
751			mdp->stats.rx_errors++;
752			if (desc_status & RD_RFS1)
753				mdp->stats.rx_crc_errors++;
754			if (desc_status & RD_RFS2)
755				mdp->stats.rx_frame_errors++;
756			if (desc_status & RD_RFS3)
757				mdp->stats.rx_length_errors++;
758			if (desc_status & RD_RFS4)
759				mdp->stats.rx_length_errors++;
760			if (desc_status & RD_RFS6)
761				mdp->stats.rx_missed_errors++;
762			if (desc_status & RD_RFS10)
763				mdp->stats.rx_over_errors++;
764		} else {
765			if (!mdp->cd->hw_swap)
766				sh_eth_soft_swap(
767					phys_to_virt(ALIGN(rxdesc->addr, 4)),
768					pkt_len + 2);
769			skb = mdp->rx_skbuff[entry];
770			mdp->rx_skbuff[entry] = NULL;
771			if (mdp->cd->rpadir)
772				skb_reserve(skb, NET_IP_ALIGN);
773			skb_put(skb, pkt_len);
774			skb->protocol = eth_type_trans(skb, ndev);
775			netif_rx(skb);
776			mdp->stats.rx_packets++;
777			mdp->stats.rx_bytes += pkt_len;
778		}
779		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
780		entry = (++mdp->cur_rx) % RX_RING_SIZE;
781		rxdesc = &mdp->rx_ring[entry];
782	}
783
784	/* Refill the Rx ring buffers. */
785	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
786		entry = mdp->dirty_rx % RX_RING_SIZE;
787		rxdesc = &mdp->rx_ring[entry];
788		/* The size of the buffer is 16 byte boundary. */
789		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
790
791		if (mdp->rx_skbuff[entry] == NULL) {
792			skb = dev_alloc_skb(mdp->rx_buf_sz);
793			mdp->rx_skbuff[entry] = skb;
794			if (skb == NULL)
795				break;	/* Better luck next round. */
796			dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
797					DMA_FROM_DEVICE);
798			skb->dev = ndev;
799			sh_eth_set_receive_align(skb);
800
801			skb->ip_summed = CHECKSUM_NONE;
802			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
803		}
804		if (entry >= RX_RING_SIZE - 1)
805			rxdesc->status |=
806				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
807		else
808			rxdesc->status |=
809				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
810	}
811
812	/* Restart Rx engine if stopped. */
813	/* If we don't need to check status, don't. -KDU */
814	if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
815		ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
816
817	return 0;
818}
819
820/* error control function */
821static void sh_eth_error(struct net_device *ndev, int intr_status)
822{
823	struct sh_eth_private *mdp = netdev_priv(ndev);
824	u32 ioaddr = ndev->base_addr;
825	u32 felic_stat;
826	u32 link_stat;
827	u32 mask;
828
829	if (intr_status & EESR_ECI) {
830		felic_stat = ctrl_inl(ioaddr + ECSR);
831		ctrl_outl(felic_stat, ioaddr + ECSR);	/* clear int */
832		if (felic_stat & ECSR_ICD)
833			mdp->stats.tx_carrier_errors++;
834		if (felic_stat & ECSR_LCHNG) {
835			/* Link Changed */
836			if (mdp->cd->no_psr || mdp->no_ether_link) {
837				if (mdp->link == PHY_DOWN)
838					link_stat = 0;
839				else
840					link_stat = PHY_ST_LINK;
841			} else {
842				link_stat = (ctrl_inl(ioaddr + PSR));
843				if (mdp->ether_link_active_low)
844					link_stat = ~link_stat;
845			}
846			if (!(link_stat & PHY_ST_LINK)) {
847				/* Link Down : disable tx and rx */
848				ctrl_outl(ctrl_inl(ioaddr + ECMR) &
849					  ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
850			} else {
851				/* Link Up */
852				ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
853					  ~DMAC_M_ECI, ioaddr + EESIPR);
854				/*clear int */
855				ctrl_outl(ctrl_inl(ioaddr + ECSR),
856					  ioaddr + ECSR);
857				ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
858					  DMAC_M_ECI, ioaddr + EESIPR);
859				/* enable tx and rx */
860				ctrl_outl(ctrl_inl(ioaddr + ECMR) |
861					  (ECMR_RE | ECMR_TE), ioaddr + ECMR);
862			}
863		}
864	}
865
866	if (intr_status & EESR_TWB) {
867		/* Write buck end. unused write back interrupt */
868		if (intr_status & EESR_TABT)	/* Transmit Abort int */
869			mdp->stats.tx_aborted_errors++;
870	}
871
872	if (intr_status & EESR_RABT) {
873		/* Receive Abort int */
874		if (intr_status & EESR_RFRMER) {
875			/* Receive Frame Overflow int */
876			mdp->stats.rx_frame_errors++;
877			dev_err(&ndev->dev, "Receive Frame Overflow\n");
878		}
879	}
880
881	if (!mdp->cd->no_ade) {
882		if (intr_status & EESR_ADE && intr_status & EESR_TDE &&
883		    intr_status & EESR_TFE)
884			mdp->stats.tx_fifo_errors++;
885	}
886
887	if (intr_status & EESR_RDE) {
888		/* Receive Descriptor Empty int */
889		mdp->stats.rx_over_errors++;
890
891		if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
892			ctrl_outl(EDRRR_R, ioaddr + EDRRR);
893		dev_err(&ndev->dev, "Receive Descriptor Empty\n");
894	}
895	if (intr_status & EESR_RFE) {
896		/* Receive FIFO Overflow int */
897		mdp->stats.rx_fifo_errors++;
898		dev_err(&ndev->dev, "Receive FIFO Overflow\n");
899	}
900
901	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
902	if (mdp->cd->no_ade)
903		mask &= ~EESR_ADE;
904	if (intr_status & mask) {
905		/* Tx error */
906		u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
907		/* dmesg */
908		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
909				intr_status, mdp->cur_tx);
910		dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
911				mdp->dirty_tx, (u32) ndev->state, edtrr);
912		/* dirty buffer free */
913		sh_eth_txfree(ndev);
914
915		/* SH7712 BUG */
916		if (edtrr ^ EDTRR_TRNS) {
917			/* tx dma start */
918			ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
919		}
920		/* wakeup */
921		netif_wake_queue(ndev);
922	}
923}
924
925static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
926{
927	struct net_device *ndev = netdev;
928	struct sh_eth_private *mdp = netdev_priv(ndev);
929	struct sh_eth_cpu_data *cd = mdp->cd;
930	irqreturn_t ret = IRQ_NONE;
931	u32 ioaddr, intr_status = 0;
932
933	ioaddr = ndev->base_addr;
934	spin_lock(&mdp->lock);
935
936	/* Get interrpt stat */
937	intr_status = ctrl_inl(ioaddr + EESR);
938	/* Clear interrupt */
939	if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
940			EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
941			cd->tx_check | cd->eesr_err_check)) {
942		ctrl_outl(intr_status, ioaddr + EESR);
943		ret = IRQ_HANDLED;
944	} else
945		goto other_irq;
946
947	if (intr_status & (EESR_FRC | /* Frame recv*/
948			EESR_RMAF | /* Multi cast address recv*/
949			EESR_RRF  | /* Bit frame recv */
950			EESR_RTLF | /* Long frame recv*/
951			EESR_RTSF | /* short frame recv */
952			EESR_PRE  | /* PHY-LSI recv error */
953			EESR_CERF)){ /* recv frame CRC error */
954		sh_eth_rx(ndev);
955	}
956
957	/* Tx Check */
958	if (intr_status & cd->tx_check) {
959		sh_eth_txfree(ndev);
960		netif_wake_queue(ndev);
961	}
962
963	if (intr_status & cd->eesr_err_check)
964		sh_eth_error(ndev, intr_status);
965
966other_irq:
967	spin_unlock(&mdp->lock);
968
969	return ret;
970}
971
972static void sh_eth_timer(unsigned long data)
973{
974	struct net_device *ndev = (struct net_device *)data;
975	struct sh_eth_private *mdp = netdev_priv(ndev);
976
977	mod_timer(&mdp->timer, jiffies + (10 * HZ));
978}
979
980/* PHY state control function */
981static void sh_eth_adjust_link(struct net_device *ndev)
982{
983	struct sh_eth_private *mdp = netdev_priv(ndev);
984	struct phy_device *phydev = mdp->phydev;
985	u32 ioaddr = ndev->base_addr;
986	int new_state = 0;
987
988	if (phydev->link != PHY_DOWN) {
989		if (phydev->duplex != mdp->duplex) {
990			new_state = 1;
991			mdp->duplex = phydev->duplex;
992			if (mdp->cd->set_duplex)
993				mdp->cd->set_duplex(ndev);
994		}
995
996		if (phydev->speed != mdp->speed) {
997			new_state = 1;
998			mdp->speed = phydev->speed;
999			if (mdp->cd->set_rate)
1000				mdp->cd->set_rate(ndev);
1001		}
1002		if (mdp->link == PHY_DOWN) {
1003			ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
1004					| ECMR_DM, ioaddr + ECMR);
1005			new_state = 1;
1006			mdp->link = phydev->link;
1007		}
1008	} else if (mdp->link) {
1009		new_state = 1;
1010		mdp->link = PHY_DOWN;
1011		mdp->speed = 0;
1012		mdp->duplex = -1;
1013	}
1014
1015	if (new_state)
1016		phy_print_status(phydev);
1017}
1018
1019/* PHY init function */
1020static int sh_eth_phy_init(struct net_device *ndev)
1021{
1022	struct sh_eth_private *mdp = netdev_priv(ndev);
1023	char phy_id[MII_BUS_ID_SIZE + 3];
1024	struct phy_device *phydev = NULL;
1025
1026	snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1027		mdp->mii_bus->id , mdp->phy_id);
1028
1029	mdp->link = PHY_DOWN;
1030	mdp->speed = 0;
1031	mdp->duplex = -1;
1032
1033	/* Try connect to PHY */
1034	phydev = phy_connect(ndev, phy_id, &sh_eth_adjust_link,
1035				0, PHY_INTERFACE_MODE_MII);
1036	if (IS_ERR(phydev)) {
1037		dev_err(&ndev->dev, "phy_connect failed\n");
1038		return PTR_ERR(phydev);
1039	}
1040
1041	dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1042		phydev->addr, phydev->drv->name);
1043
1044	mdp->phydev = phydev;
1045
1046	return 0;
1047}
1048
1049/* PHY control start function */
1050static int sh_eth_phy_start(struct net_device *ndev)
1051{
1052	struct sh_eth_private *mdp = netdev_priv(ndev);
1053	int ret;
1054
1055	ret = sh_eth_phy_init(ndev);
1056	if (ret)
1057		return ret;
1058
1059	/* reset phy - this also wakes it from PDOWN */
1060	phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1061	phy_start(mdp->phydev);
1062
1063	return 0;
1064}
1065
1066/* network device open function */
1067static int sh_eth_open(struct net_device *ndev)
1068{
1069	int ret = 0;
1070	struct sh_eth_private *mdp = netdev_priv(ndev);
1071
1072	pm_runtime_get_sync(&mdp->pdev->dev);
1073
1074	ret = request_irq(ndev->irq, sh_eth_interrupt,
1075#if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1076	defined(CONFIG_CPU_SUBTYPE_SH7757)
1077				IRQF_SHARED,
1078#else
1079				0,
1080#endif
1081				ndev->name, ndev);
1082	if (ret) {
1083		dev_err(&ndev->dev, "Can not assign IRQ number\n");
1084		return ret;
1085	}
1086
1087	/* Descriptor set */
1088	ret = sh_eth_ring_init(ndev);
1089	if (ret)
1090		goto out_free_irq;
1091
1092	/* device init */
1093	ret = sh_eth_dev_init(ndev);
1094	if (ret)
1095		goto out_free_irq;
1096
1097	/* PHY control start*/
1098	ret = sh_eth_phy_start(ndev);
1099	if (ret)
1100		goto out_free_irq;
1101
1102	/* Set the timer to check for link beat. */
1103	init_timer(&mdp->timer);
1104	mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1105	setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
1106
1107	return ret;
1108
1109out_free_irq:
1110	free_irq(ndev->irq, ndev);
1111	pm_runtime_put_sync(&mdp->pdev->dev);
1112	return ret;
1113}
1114
1115/* Timeout function */
1116static void sh_eth_tx_timeout(struct net_device *ndev)
1117{
1118	struct sh_eth_private *mdp = netdev_priv(ndev);
1119	u32 ioaddr = ndev->base_addr;
1120	struct sh_eth_rxdesc *rxdesc;
1121	int i;
1122
1123	netif_stop_queue(ndev);
1124
1125	/* worning message out. */
1126	printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
1127	       " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR));
1128
1129	/* tx_errors count up */
1130	mdp->stats.tx_errors++;
1131
1132	/* timer off */
1133	del_timer_sync(&mdp->timer);
1134
1135	/* Free all the skbuffs in the Rx queue. */
1136	for (i = 0; i < RX_RING_SIZE; i++) {
1137		rxdesc = &mdp->rx_ring[i];
1138		rxdesc->status = 0;
1139		rxdesc->addr = 0xBADF00D0;
1140		if (mdp->rx_skbuff[i])
1141			dev_kfree_skb(mdp->rx_skbuff[i]);
1142		mdp->rx_skbuff[i] = NULL;
1143	}
1144	for (i = 0; i < TX_RING_SIZE; i++) {
1145		if (mdp->tx_skbuff[i])
1146			dev_kfree_skb(mdp->tx_skbuff[i]);
1147		mdp->tx_skbuff[i] = NULL;
1148	}
1149
1150	/* device init */
1151	sh_eth_dev_init(ndev);
1152
1153	/* timer on */
1154	mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1155	add_timer(&mdp->timer);
1156}
1157
1158/* Packet transmit function */
1159static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1160{
1161	struct sh_eth_private *mdp = netdev_priv(ndev);
1162	struct sh_eth_txdesc *txdesc;
1163	u32 entry;
1164	unsigned long flags;
1165
1166	spin_lock_irqsave(&mdp->lock, flags);
1167	if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1168		if (!sh_eth_txfree(ndev)) {
1169			netif_stop_queue(ndev);
1170			spin_unlock_irqrestore(&mdp->lock, flags);
1171			return NETDEV_TX_BUSY;
1172		}
1173	}
1174	spin_unlock_irqrestore(&mdp->lock, flags);
1175
1176	entry = mdp->cur_tx % TX_RING_SIZE;
1177	mdp->tx_skbuff[entry] = skb;
1178	txdesc = &mdp->tx_ring[entry];
1179	txdesc->addr = virt_to_phys(skb->data);
1180	/* soft swap. */
1181	if (!mdp->cd->hw_swap)
1182		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1183				 skb->len + 2);
1184	/* write back */
1185	__flush_purge_region(skb->data, skb->len);
1186	if (skb->len < ETHERSMALL)
1187		txdesc->buffer_length = ETHERSMALL;
1188	else
1189		txdesc->buffer_length = skb->len;
1190
1191	if (entry >= TX_RING_SIZE - 1)
1192		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1193	else
1194		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1195
1196	mdp->cur_tx++;
1197
1198	if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
1199		ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
1200
1201	return NETDEV_TX_OK;
1202}
1203
1204/* device close function */
1205static int sh_eth_close(struct net_device *ndev)
1206{
1207	struct sh_eth_private *mdp = netdev_priv(ndev);
1208	u32 ioaddr = ndev->base_addr;
1209	int ringsize;
1210
1211	netif_stop_queue(ndev);
1212
1213	/* Disable interrupts by clearing the interrupt mask. */
1214	ctrl_outl(0x0000, ioaddr + EESIPR);
1215
1216	/* Stop the chip's Tx and Rx processes. */
1217	ctrl_outl(0, ioaddr + EDTRR);
1218	ctrl_outl(0, ioaddr + EDRRR);
1219
1220	/* PHY Disconnect */
1221	if (mdp->phydev) {
1222		phy_stop(mdp->phydev);
1223		phy_disconnect(mdp->phydev);
1224	}
1225
1226	free_irq(ndev->irq, ndev);
1227
1228	del_timer_sync(&mdp->timer);
1229
1230	/* Free all the skbuffs in the Rx queue. */
1231	sh_eth_ring_free(ndev);
1232
1233	/* free DMA buffer */
1234	ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
1235	dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1236
1237	/* free DMA buffer */
1238	ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1239	dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1240
1241	pm_runtime_put_sync(&mdp->pdev->dev);
1242
1243	return 0;
1244}
1245
1246static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1247{
1248	struct sh_eth_private *mdp = netdev_priv(ndev);
1249	u32 ioaddr = ndev->base_addr;
1250
1251	pm_runtime_get_sync(&mdp->pdev->dev);
1252
1253	mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
1254	ctrl_outl(0, ioaddr + TROCR);	/* (write clear) */
1255	mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
1256	ctrl_outl(0, ioaddr + CDCR);	/* (write clear) */
1257	mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
1258	ctrl_outl(0, ioaddr + LCCR);	/* (write clear) */
1259#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1260	mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */
1261	ctrl_outl(0, ioaddr + CERCR);	/* (write clear) */
1262	mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */
1263	ctrl_outl(0, ioaddr + CEECR);	/* (write clear) */
1264#else
1265	mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
1266	ctrl_outl(0, ioaddr + CNDCR);	/* (write clear) */
1267#endif
1268	pm_runtime_put_sync(&mdp->pdev->dev);
1269
1270	return &mdp->stats;
1271}
1272
1273/* ioctl to device funciotn*/
1274static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1275				int cmd)
1276{
1277	struct sh_eth_private *mdp = netdev_priv(ndev);
1278	struct phy_device *phydev = mdp->phydev;
1279
1280	if (!netif_running(ndev))
1281		return -EINVAL;
1282
1283	if (!phydev)
1284		return -ENODEV;
1285
1286	return phy_mii_ioctl(phydev, rq, cmd);
1287}
1288
1289#if defined(SH_ETH_HAS_TSU)
1290/* Multicast reception directions set */
1291static void sh_eth_set_multicast_list(struct net_device *ndev)
1292{
1293	u32 ioaddr = ndev->base_addr;
1294
1295	if (ndev->flags & IFF_PROMISC) {
1296		/* Set promiscuous. */
1297		ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
1298			  ioaddr + ECMR);
1299	} else {
1300		/* Normal, unicast/broadcast-only mode. */
1301		ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
1302			  ioaddr + ECMR);
1303	}
1304}
1305
1306/* SuperH's TSU register init function */
1307static void sh_eth_tsu_init(u32 ioaddr)
1308{
1309	ctrl_outl(0, ioaddr + TSU_FWEN0);	/* Disable forward(0->1) */
1310	ctrl_outl(0, ioaddr + TSU_FWEN1);	/* Disable forward(1->0) */
1311	ctrl_outl(0, ioaddr + TSU_FCM);	/* forward fifo 3k-3k */
1312	ctrl_outl(0xc, ioaddr + TSU_BSYSL0);
1313	ctrl_outl(0xc, ioaddr + TSU_BSYSL1);
1314	ctrl_outl(0, ioaddr + TSU_PRISL0);
1315	ctrl_outl(0, ioaddr + TSU_PRISL1);
1316	ctrl_outl(0, ioaddr + TSU_FWSL0);
1317	ctrl_outl(0, ioaddr + TSU_FWSL1);
1318	ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
1319#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1320	ctrl_outl(0, ioaddr + TSU_QTAG0);	/* Disable QTAG(0->1) */
1321	ctrl_outl(0, ioaddr + TSU_QTAG1);	/* Disable QTAG(1->0) */
1322#else
1323	ctrl_outl(0, ioaddr + TSU_QTAGM0);	/* Disable QTAG(0->1) */
1324	ctrl_outl(0, ioaddr + TSU_QTAGM1);	/* Disable QTAG(1->0) */
1325#endif
1326	ctrl_outl(0, ioaddr + TSU_FWSR);	/* all interrupt status clear */
1327	ctrl_outl(0, ioaddr + TSU_FWINMK);	/* Disable all interrupt */
1328	ctrl_outl(0, ioaddr + TSU_TEN);	/* Disable all CAM entry */
1329	ctrl_outl(0, ioaddr + TSU_POST1);	/* Disable CAM entry [ 0- 7] */
1330	ctrl_outl(0, ioaddr + TSU_POST2);	/* Disable CAM entry [ 8-15] */
1331	ctrl_outl(0, ioaddr + TSU_POST3);	/* Disable CAM entry [16-23] */
1332	ctrl_outl(0, ioaddr + TSU_POST4);	/* Disable CAM entry [24-31] */
1333}
1334#endif /* SH_ETH_HAS_TSU */
1335
1336/* MDIO bus release function */
1337static int sh_mdio_release(struct net_device *ndev)
1338{
1339	struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
1340
1341	/* unregister mdio bus */
1342	mdiobus_unregister(bus);
1343
1344	/* remove mdio bus info from net_device */
1345	dev_set_drvdata(&ndev->dev, NULL);
1346
1347	/* free interrupts memory */
1348	kfree(bus->irq);
1349
1350	/* free bitbang info */
1351	free_mdio_bitbang(bus);
1352
1353	return 0;
1354}
1355
1356/* MDIO bus init function */
1357static int sh_mdio_init(struct net_device *ndev, int id)
1358{
1359	int ret, i;
1360	struct bb_info *bitbang;
1361	struct sh_eth_private *mdp = netdev_priv(ndev);
1362
1363	/* create bit control struct for PHY */
1364	bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
1365	if (!bitbang) {
1366		ret = -ENOMEM;
1367		goto out;
1368	}
1369
1370	/* bitbang init */
1371	bitbang->addr = ndev->base_addr + PIR;
1372	bitbang->mdi_msk = 0x08;
1373	bitbang->mdo_msk = 0x04;
1374	bitbang->mmd_msk = 0x02;/* MMD */
1375	bitbang->mdc_msk = 0x01;
1376	bitbang->ctrl.ops = &bb_ops;
1377
1378	/* MII controller setting */
1379	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
1380	if (!mdp->mii_bus) {
1381		ret = -ENOMEM;
1382		goto out_free_bitbang;
1383	}
1384
1385	/* Hook up MII support for ethtool */
1386	mdp->mii_bus->name = "sh_mii";
1387	mdp->mii_bus->parent = &ndev->dev;
1388	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
1389
1390	/* PHY IRQ */
1391	mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1392	if (!mdp->mii_bus->irq) {
1393		ret = -ENOMEM;
1394		goto out_free_bus;
1395	}
1396
1397	for (i = 0; i < PHY_MAX_ADDR; i++)
1398		mdp->mii_bus->irq[i] = PHY_POLL;
1399
1400	/* regist mdio bus */
1401	ret = mdiobus_register(mdp->mii_bus);
1402	if (ret)
1403		goto out_free_irq;
1404
1405	dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1406
1407	return 0;
1408
1409out_free_irq:
1410	kfree(mdp->mii_bus->irq);
1411
1412out_free_bus:
1413	free_mdio_bitbang(mdp->mii_bus);
1414
1415out_free_bitbang:
1416	kfree(bitbang);
1417
1418out:
1419	return ret;
1420}
1421
1422static const struct net_device_ops sh_eth_netdev_ops = {
1423	.ndo_open		= sh_eth_open,
1424	.ndo_stop		= sh_eth_close,
1425	.ndo_start_xmit		= sh_eth_start_xmit,
1426	.ndo_get_stats		= sh_eth_get_stats,
1427#if defined(SH_ETH_HAS_TSU)
1428	.ndo_set_multicast_list	= sh_eth_set_multicast_list,
1429#endif
1430	.ndo_tx_timeout		= sh_eth_tx_timeout,
1431	.ndo_do_ioctl		= sh_eth_do_ioctl,
1432	.ndo_validate_addr	= eth_validate_addr,
1433	.ndo_set_mac_address	= eth_mac_addr,
1434	.ndo_change_mtu		= eth_change_mtu,
1435};
1436
1437static int sh_eth_drv_probe(struct platform_device *pdev)
1438{
1439	int ret, devno = 0;
1440	struct resource *res;
1441	struct net_device *ndev = NULL;
1442	struct sh_eth_private *mdp;
1443	struct sh_eth_plat_data *pd;
1444
1445	/* get base addr */
1446	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1447	if (unlikely(res == NULL)) {
1448		dev_err(&pdev->dev, "invalid resource\n");
1449		ret = -EINVAL;
1450		goto out;
1451	}
1452
1453	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1454	if (!ndev) {
1455		dev_err(&pdev->dev, "Could not allocate device.\n");
1456		ret = -ENOMEM;
1457		goto out;
1458	}
1459
1460	/* The sh Ether-specific entries in the device structure. */
1461	ndev->base_addr = res->start;
1462	devno = pdev->id;
1463	if (devno < 0)
1464		devno = 0;
1465
1466	ndev->dma = -1;
1467	ret = platform_get_irq(pdev, 0);
1468	if (ret < 0) {
1469		ret = -ENODEV;
1470		goto out_release;
1471	}
1472	ndev->irq = ret;
1473
1474	SET_NETDEV_DEV(ndev, &pdev->dev);
1475
1476	/* Fill in the fields of the device structure with ethernet values. */
1477	ether_setup(ndev);
1478
1479	mdp = netdev_priv(ndev);
1480	spin_lock_init(&mdp->lock);
1481	mdp->pdev = pdev;
1482	pm_runtime_enable(&pdev->dev);
1483	pm_runtime_resume(&pdev->dev);
1484
1485	pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1486	/* get PHY ID */
1487	mdp->phy_id = pd->phy;
1488	/* EDMAC endian */
1489	mdp->edmac_endian = pd->edmac_endian;
1490	mdp->no_ether_link = pd->no_ether_link;
1491	mdp->ether_link_active_low = pd->ether_link_active_low;
1492
1493	/* set cpu data */
1494	mdp->cd = &sh_eth_my_cpu_data;
1495	sh_eth_set_default_cpu_data(mdp->cd);
1496
1497	/* set function */
1498	ndev->netdev_ops = &sh_eth_netdev_ops;
1499	ndev->watchdog_timeo = TX_TIMEOUT;
1500
1501	mdp->post_rx = POST_RX >> (devno << 1);
1502	mdp->post_fw = POST_FW >> (devno << 1);
1503
1504	/* read and set MAC address */
1505	read_mac_address(ndev, pd->mac_addr);
1506
1507	/* First device only init */
1508	if (!devno) {
1509		if (mdp->cd->chip_reset)
1510			mdp->cd->chip_reset(ndev);
1511
1512#if defined(SH_ETH_HAS_TSU)
1513		/* TSU init (Init only)*/
1514		sh_eth_tsu_init(SH_TSU_ADDR);
1515#endif
1516	}
1517
1518	/* network device register */
1519	ret = register_netdev(ndev);
1520	if (ret)
1521		goto out_release;
1522
1523	/* mdio bus init */
1524	ret = sh_mdio_init(ndev, pdev->id);
1525	if (ret)
1526		goto out_unregister;
1527
1528	/* print device infomation */
1529	pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
1530	       (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1531
1532	platform_set_drvdata(pdev, ndev);
1533
1534	return ret;
1535
1536out_unregister:
1537	unregister_netdev(ndev);
1538
1539out_release:
1540	/* net_dev free */
1541	if (ndev)
1542		free_netdev(ndev);
1543
1544out:
1545	return ret;
1546}
1547
1548static int sh_eth_drv_remove(struct platform_device *pdev)
1549{
1550	struct net_device *ndev = platform_get_drvdata(pdev);
1551
1552	sh_mdio_release(ndev);
1553	unregister_netdev(ndev);
1554	flush_scheduled_work();
1555	pm_runtime_disable(&pdev->dev);
1556	free_netdev(ndev);
1557	platform_set_drvdata(pdev, NULL);
1558
1559	return 0;
1560}
1561
1562static int sh_eth_runtime_nop(struct device *dev)
1563{
1564	/*
1565	 * Runtime PM callback shared between ->runtime_suspend()
1566	 * and ->runtime_resume(). Simply returns success.
1567	 *
1568	 * This driver re-initializes all registers after
1569	 * pm_runtime_get_sync() anyway so there is no need
1570	 * to save and restore registers here.
1571	 */
1572	return 0;
1573}
1574
1575static struct dev_pm_ops sh_eth_dev_pm_ops = {
1576	.runtime_suspend = sh_eth_runtime_nop,
1577	.runtime_resume = sh_eth_runtime_nop,
1578};
1579
1580static struct platform_driver sh_eth_driver = {
1581	.probe = sh_eth_drv_probe,
1582	.remove = sh_eth_drv_remove,
1583	.driver = {
1584		   .name = CARDNAME,
1585		   .pm = &sh_eth_dev_pm_ops,
1586	},
1587};
1588
1589static int __init sh_eth_init(void)
1590{
1591	return platform_driver_register(&sh_eth_driver);
1592}
1593
1594static void __exit sh_eth_cleanup(void)
1595{
1596	platform_driver_unregister(&sh_eth_driver);
1597}
1598
1599module_init(sh_eth_init);
1600module_exit(sh_eth_cleanup);
1601
1602MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1603MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1604MODULE_LICENSE("GPL v2");
1605