1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/platform_device.h>
12#include <linux/export.h>
13#include <bcm63xx_dev_enet.h>
14#include <bcm63xx_io.h>
15#include <bcm63xx_regs.h>
16
17static const unsigned long bcm6348_regs_enetdmac[] = {
18	[ENETDMAC_CHANCFG]	= ENETDMAC_CHANCFG_REG,
19	[ENETDMAC_IR]		= ENETDMAC_IR_REG,
20	[ENETDMAC_IRMASK]	= ENETDMAC_IRMASK_REG,
21	[ENETDMAC_MAXBURST]	= ENETDMAC_MAXBURST_REG,
22};
23
24static const unsigned long bcm6345_regs_enetdmac[] = {
25	[ENETDMAC_CHANCFG]	= ENETDMA_6345_CHANCFG_REG,
26	[ENETDMAC_IR]		= ENETDMA_6345_IR_REG,
27	[ENETDMAC_IRMASK]	= ENETDMA_6345_IRMASK_REG,
28	[ENETDMAC_MAXBURST]	= ENETDMA_6345_MAXBURST_REG,
29	[ENETDMAC_BUFALLOC]	= ENETDMA_6345_BUFALLOC_REG,
30	[ENETDMAC_RSTART]	= ENETDMA_6345_RSTART_REG,
31	[ENETDMAC_FC]		= ENETDMA_6345_FC_REG,
32	[ENETDMAC_LEN]		= ENETDMA_6345_LEN_REG,
33};
34
35const unsigned long *bcm63xx_regs_enetdmac;
36EXPORT_SYMBOL(bcm63xx_regs_enetdmac);
37
38static __init void bcm63xx_enetdmac_regs_init(void)
39{
40	if (BCMCPU_IS_6345())
41		bcm63xx_regs_enetdmac = bcm6345_regs_enetdmac;
42	else
43		bcm63xx_regs_enetdmac = bcm6348_regs_enetdmac;
44}
45
46static struct resource shared_res[] = {
47	{
48		.start		= -1, /* filled at runtime */
49		.end		= -1, /* filled at runtime */
50		.flags		= IORESOURCE_MEM,
51	},
52	{
53		.start		= -1, /* filled at runtime */
54		.end		= -1, /* filled at runtime */
55		.flags		= IORESOURCE_MEM,
56	},
57	{
58		.start		= -1, /* filled at runtime */
59		.end		= -1, /* filled at runtime */
60		.flags		= IORESOURCE_MEM,
61	},
62};
63
64static struct platform_device bcm63xx_enet_shared_device = {
65	.name		= "bcm63xx_enet_shared",
66	.id		= 0,
67	.num_resources	= ARRAY_SIZE(shared_res),
68	.resource	= shared_res,
69};
70
71static int shared_device_registered;
72
73static u64 enet_dmamask = DMA_BIT_MASK(32);
74
75static struct resource enet0_res[] = {
76	{
77		.start		= -1, /* filled at runtime */
78		.end		= -1, /* filled at runtime */
79		.flags		= IORESOURCE_MEM,
80	},
81	{
82		.start		= -1, /* filled at runtime */
83		.flags		= IORESOURCE_IRQ,
84	},
85	{
86		.start		= -1, /* filled at runtime */
87		.flags		= IORESOURCE_IRQ,
88	},
89	{
90		.start		= -1, /* filled at runtime */
91		.flags		= IORESOURCE_IRQ,
92	},
93};
94
95static struct bcm63xx_enet_platform_data enet0_pd;
96
97static struct platform_device bcm63xx_enet0_device = {
98	.name		= "bcm63xx_enet",
99	.id		= 0,
100	.num_resources	= ARRAY_SIZE(enet0_res),
101	.resource	= enet0_res,
102	.dev		= {
103		.platform_data = &enet0_pd,
104		.dma_mask = &enet_dmamask,
105		.coherent_dma_mask = DMA_BIT_MASK(32),
106	},
107};
108
109static struct resource enet1_res[] = {
110	{
111		.start		= -1, /* filled at runtime */
112		.end		= -1, /* filled at runtime */
113		.flags		= IORESOURCE_MEM,
114	},
115	{
116		.start		= -1, /* filled at runtime */
117		.flags		= IORESOURCE_IRQ,
118	},
119	{
120		.start		= -1, /* filled at runtime */
121		.flags		= IORESOURCE_IRQ,
122	},
123	{
124		.start		= -1, /* filled at runtime */
125		.flags		= IORESOURCE_IRQ,
126	},
127};
128
129static struct bcm63xx_enet_platform_data enet1_pd;
130
131static struct platform_device bcm63xx_enet1_device = {
132	.name		= "bcm63xx_enet",
133	.id		= 1,
134	.num_resources	= ARRAY_SIZE(enet1_res),
135	.resource	= enet1_res,
136	.dev		= {
137		.platform_data = &enet1_pd,
138		.dma_mask = &enet_dmamask,
139		.coherent_dma_mask = DMA_BIT_MASK(32),
140	},
141};
142
143static struct resource enetsw_res[] = {
144	{
145		/* start & end filled at runtime */
146		.flags		= IORESOURCE_MEM,
147	},
148	{
149		/* start filled at runtime */
150		.flags		= IORESOURCE_IRQ,
151	},
152	{
153		/* start filled at runtime */
154		.flags		= IORESOURCE_IRQ,
155	},
156};
157
158static struct bcm63xx_enetsw_platform_data enetsw_pd;
159
160static struct platform_device bcm63xx_enetsw_device = {
161	.name		= "bcm63xx_enetsw",
162	.num_resources	= ARRAY_SIZE(enetsw_res),
163	.resource	= enetsw_res,
164	.dev		= {
165		.platform_data = &enetsw_pd,
166		.dma_mask = &enet_dmamask,
167		.coherent_dma_mask = DMA_BIT_MASK(32),
168	},
169};
170
171static int __init register_shared(void)
172{
173	int ret, chan_count;
174
175	if (shared_device_registered)
176		return 0;
177
178	bcm63xx_enetdmac_regs_init();
179
180	shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
181	shared_res[0].end = shared_res[0].start;
182	if (BCMCPU_IS_6345())
183		shared_res[0].end += (RSET_6345_ENETDMA_SIZE) - 1;
184	else
185		shared_res[0].end += (RSET_ENETDMA_SIZE)  - 1;
186
187	if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
188		chan_count = 32;
189	else if (BCMCPU_IS_6345())
190		chan_count = 8;
191	else
192		chan_count = 16;
193
194	shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC);
195	shared_res[1].end = shared_res[1].start;
196	shared_res[1].end += RSET_ENETDMAC_SIZE(chan_count)  - 1;
197
198	shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS);
199	shared_res[2].end = shared_res[2].start;
200	shared_res[2].end += RSET_ENETDMAS_SIZE(chan_count)  - 1;
201
202	ret = platform_device_register(&bcm63xx_enet_shared_device);
203	if (ret)
204		return ret;
205	shared_device_registered = 1;
206
207	return 0;
208}
209
210int __init bcm63xx_enet_register(int unit,
211				 const struct bcm63xx_enet_platform_data *pd)
212{
213	struct platform_device *pdev;
214	struct bcm63xx_enet_platform_data *dpd;
215	int ret;
216
217	if (unit > 1)
218		return -ENODEV;
219
220	if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345()))
221		return -ENODEV;
222
223	ret = register_shared();
224	if (ret)
225		return ret;
226
227	if (unit == 0) {
228		enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0);
229		enet0_res[0].end = enet0_res[0].start;
230		enet0_res[0].end += RSET_ENET_SIZE - 1;
231		enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0);
232		enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA);
233		enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA);
234		pdev = &bcm63xx_enet0_device;
235	} else {
236		enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1);
237		enet1_res[0].end = enet1_res[0].start;
238		enet1_res[0].end += RSET_ENET_SIZE - 1;
239		enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1);
240		enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA);
241		enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA);
242		pdev = &bcm63xx_enet1_device;
243	}
244
245	/* copy given platform data */
246	dpd = pdev->dev.platform_data;
247	memcpy(dpd, pd, sizeof(*pd));
248
249	/* adjust them in case internal phy is used */
250	if (dpd->use_internal_phy) {
251
252		/* internal phy only exists for enet0 */
253		if (unit == 1)
254			return -ENODEV;
255
256		dpd->phy_id = 1;
257		dpd->has_phy_interrupt = 1;
258		dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY);
259	}
260
261	dpd->dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
262	dpd->dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
263	if (BCMCPU_IS_6345()) {
264		dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_CHAINING_MASK;
265		dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_WRAP_EN_MASK;
266		dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_FLOWC_EN_MASK;
267		dpd->dma_chan_int_mask |= ENETDMA_IR_BUFDONE_MASK;
268		dpd->dma_chan_int_mask |= ENETDMA_IR_NOTOWNER_MASK;
269		dpd->dma_chan_width = ENETDMA_6345_CHAN_WIDTH;
270		dpd->dma_desc_shift = ENETDMA_6345_DESC_SHIFT;
271	} else {
272		dpd->dma_has_sram = true;
273		dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
274	}
275
276	if (unit == 0) {
277		dpd->rx_chan = 0;
278		dpd->tx_chan = 1;
279	} else {
280		dpd->rx_chan = 2;
281		dpd->tx_chan = 3;
282	}
283
284	ret = platform_device_register(pdev);
285	if (ret)
286		return ret;
287	return 0;
288}
289
290int __init
291bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd)
292{
293	int ret;
294
295	if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362() && !BCMCPU_IS_6368())
296		return -ENODEV;
297
298	ret = register_shared();
299	if (ret)
300		return ret;
301
302	enetsw_res[0].start = bcm63xx_regset_address(RSET_ENETSW);
303	enetsw_res[0].end = enetsw_res[0].start;
304	enetsw_res[0].end += RSET_ENETSW_SIZE - 1;
305	enetsw_res[1].start = bcm63xx_get_irq_number(IRQ_ENETSW_RXDMA0);
306	enetsw_res[2].start = bcm63xx_get_irq_number(IRQ_ENETSW_TXDMA0);
307	if (!enetsw_res[2].start)
308		enetsw_res[2].start = -1;
309
310	memcpy(bcm63xx_enetsw_device.dev.platform_data, pd, sizeof(*pd));
311
312	if (BCMCPU_IS_6328())
313		enetsw_pd.num_ports = ENETSW_PORTS_6328;
314	else if (BCMCPU_IS_6362() || BCMCPU_IS_6368())
315		enetsw_pd.num_ports = ENETSW_PORTS_6368;
316
317	enetsw_pd.dma_has_sram = true;
318	enetsw_pd.dma_chan_width = ENETDMA_CHAN_WIDTH;
319	enetsw_pd.dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
320	enetsw_pd.dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
321
322	ret = platform_device_register(&bcm63xx_enetsw_device);
323	if (ret)
324		return ret;
325
326	return 0;
327}
328