1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2009
4 * Marvell Semiconductor <www.marvell.com>
5 * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
6 *
7 * (C) Copyright 2003
8 * Ingo Assmus <ingo.assmus@keymile.com>
9 *
10 * based on - Driver for MV64360X ethernet ports
11 * Copyright (C) 2002 rabeeh@galileo.co.il
12 */
13
14#include <common.h>
15#include <dm.h>
16#include <log.h>
17#include <net.h>
18#include <malloc.h>
19#include <miiphy.h>
20#include <wait_bit.h>
21#include <asm/global_data.h>
22#include <asm/io.h>
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <asm/types.h>
26#include <asm/system.h>
27#include <asm/byteorder.h>
28#include <asm/arch/cpu.h>
29
30#if defined(CONFIG_ARCH_KIRKWOOD)
31#include <asm/arch/soc.h>
32#elif defined(CONFIG_ARCH_ORION5X)
33#include <asm/arch/orion5x.h>
34#endif
35
36#include "mvgbe.h"
37
38DECLARE_GLOBAL_DATA_PTR;
39
40#define MV_PHY_ADR_REQUEST 0xee
41#define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
42#define MVGBE_PGADR_REG	22
43
44#if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
45static int smi_wait_ready(struct mvgbe_device *dmvgbe)
46{
47	int ret;
48
49	ret = wait_for_bit_le32(&MVGBE_SMI_REG, MVGBE_PHY_SMI_BUSY_MASK, false,
50				MVGBE_PHY_SMI_TIMEOUT_MS, false);
51	if (ret) {
52		printf("Error: SMI busy timeout\n");
53		return ret;
54	}
55
56	return 0;
57}
58
59static int __mvgbe_mdio_read(struct mvgbe_device *dmvgbe, int phy_adr,
60			     int devad, int reg_ofs)
61{
62	struct mvgbe_registers *regs = dmvgbe->regs;
63	u32 smi_reg;
64	u32 timeout;
65	u16 data = 0;
66
67	/* Phyadr read request */
68	if (phy_adr == MV_PHY_ADR_REQUEST &&
69			reg_ofs == MV_PHY_ADR_REQUEST) {
70		/* */
71		data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
72		return data;
73	}
74	/* check parameters */
75	if (phy_adr > PHYADR_MASK) {
76		printf("Err..(%s) Invalid PHY address %d\n",
77			__func__, phy_adr);
78		return -EFAULT;
79	}
80	if (reg_ofs > PHYREG_MASK) {
81		printf("Err..(%s) Invalid register offset %d\n",
82			__func__, reg_ofs);
83		return -EFAULT;
84	}
85
86	/* wait till the SMI is not busy */
87	if (smi_wait_ready(dmvgbe) < 0)
88		return -EFAULT;
89
90	/* fill the phy address and regiser offset and read opcode */
91	smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
92		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
93		| MVGBE_PHY_SMI_OPCODE_READ;
94
95	/* write the smi register */
96	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
97
98	/*wait till read value is ready */
99	timeout = MVGBE_PHY_SMI_TIMEOUT;
100
101	do {
102		/* read smi register */
103		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
104		if (timeout-- == 0) {
105			printf("Err..(%s) SMI read ready timeout\n",
106				__func__);
107			return -EFAULT;
108		}
109	} while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
110
111	/* Wait for the data to update in the SMI register */
112	for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
113		;
114
115	data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
116
117	debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
118	      data);
119
120	return data;
121}
122
123/*
124 * smi_reg_read - miiphy_read callback function.
125 *
126 * Returns 16bit phy register value, or -EFAULT on error
127 */
128static int smi_reg_read(struct mii_dev *bus, int phy_adr, int devad,
129			int reg_ofs)
130{
131	struct mvgbe_device *dmvgbe = bus->priv;
132
133	return __mvgbe_mdio_read(dmvgbe, phy_adr, devad, reg_ofs);
134}
135
136static int __mvgbe_mdio_write(struct mvgbe_device *dmvgbe, int phy_adr,
137			      int devad, int reg_ofs, u16 data)
138{
139	struct mvgbe_registers *regs = dmvgbe->regs;
140	u32 smi_reg;
141
142	/* Phyadr write request*/
143	if (phy_adr == MV_PHY_ADR_REQUEST &&
144			reg_ofs == MV_PHY_ADR_REQUEST) {
145		MVGBE_REG_WR(regs->phyadr, data);
146		return 0;
147	}
148
149	/* check parameters */
150	if (phy_adr > PHYADR_MASK) {
151		printf("Err..(%s) Invalid phy address\n", __func__);
152		return -EINVAL;
153	}
154	if (reg_ofs > PHYREG_MASK) {
155		printf("Err..(%s) Invalid register offset\n", __func__);
156		return -EFAULT;
157	}
158
159	/* wait till the SMI is not busy */
160	if (smi_wait_ready(dmvgbe) < 0)
161		return -EFAULT;
162
163	/* fill the phy addr and reg offset and write opcode and data */
164	smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
165	smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
166		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
167	smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
168
169	/* write the smi register */
170	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
171
172	return 0;
173}
174
175/*
176 * smi_reg_write - miiphy_write callback function.
177 *
178 * Returns 0 if write succeed, -EFAULT on error
179 */
180static int smi_reg_write(struct mii_dev *bus, int phy_adr, int devad,
181			 int reg_ofs, u16 data)
182{
183	struct mvgbe_device *dmvgbe = bus->priv;
184
185	return __mvgbe_mdio_write(dmvgbe, phy_adr, devad, reg_ofs, data);
186}
187#endif
188
189/* Stop and checks all queues */
190static void stop_queue(u32 * qreg)
191{
192	u32 reg_data;
193
194	reg_data = readl(qreg);
195
196	if (reg_data & 0xFF) {
197		/* Issue stop command for active channels only */
198		writel((reg_data << 8), qreg);
199
200		/* Wait for all queue activity to terminate. */
201		do {
202			/*
203			 * Check port cause register that all queues
204			 * are stopped
205			 */
206			reg_data = readl(qreg);
207		}
208		while (reg_data & 0xFF);
209	}
210}
211
212/*
213 * set_access_control - Config address decode parameters for Ethernet unit
214 *
215 * This function configures the address decode parameters for the Gigabit
216 * Ethernet Controller according the given parameters struct.
217 *
218 * @regs	Register struct pointer.
219 * @param	Address decode parameter struct.
220 */
221static void set_access_control(struct mvgbe_registers *regs,
222				struct mvgbe_winparam *param)
223{
224	u32 access_prot_reg;
225
226	/* Set access control register */
227	access_prot_reg = MVGBE_REG_RD(regs->epap);
228	/* clear window permission */
229	access_prot_reg &= (~(3 << (param->win * 2)));
230	access_prot_reg |= (param->access_ctrl << (param->win * 2));
231	MVGBE_REG_WR(regs->epap, access_prot_reg);
232
233	/* Set window Size reg (SR) */
234	MVGBE_REG_WR(regs->barsz[param->win].size,
235			(((param->size / 0x10000) - 1) << 16));
236
237	/* Set window Base address reg (BA) */
238	MVGBE_REG_WR(regs->barsz[param->win].bar,
239			(param->target | param->attrib | param->base_addr));
240	/* High address remap reg (HARR) */
241	if (param->win < 4)
242		MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
243
244	/* Base address enable reg (BARER) */
245	if (param->enable == 1)
246		MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
247	else
248		MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
249}
250
251static void set_dram_access(struct mvgbe_registers *regs)
252{
253	struct mvgbe_winparam win_param;
254	int i;
255
256	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
257		/* Set access parameters for DRAM bank i */
258		win_param.win = i;	/* Use Ethernet window i */
259		/* Window target - DDR */
260		win_param.target = MVGBE_TARGET_DRAM;
261		/* Enable full access */
262		win_param.access_ctrl = EWIN_ACCESS_FULL;
263		win_param.high_addr = 0;
264		/* Get bank base and size */
265		win_param.base_addr = gd->bd->bi_dram[i].start;
266		win_param.size = gd->bd->bi_dram[i].size;
267		if (win_param.size == 0)
268			win_param.enable = 0;
269		else
270			win_param.enable = 1;	/* Enable the access */
271
272		/* Enable DRAM bank */
273		switch (i) {
274		case 0:
275			win_param.attrib = EBAR_DRAM_CS0;
276			break;
277		case 1:
278			win_param.attrib = EBAR_DRAM_CS1;
279			break;
280		case 2:
281			win_param.attrib = EBAR_DRAM_CS2;
282			break;
283		case 3:
284			win_param.attrib = EBAR_DRAM_CS3;
285			break;
286		default:
287			/* invalid bank, disable access */
288			win_param.enable = 0;
289			win_param.attrib = 0;
290			break;
291		}
292		/* Set the access control for address window(EPAPR) RD/WR */
293		set_access_control(regs, &win_param);
294	}
295}
296
297/*
298 * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
299 *
300 * Go through all the DA filter tables (Unicast, Special Multicast & Other
301 * Multicast) and set each entry to 0.
302 */
303static void port_init_mac_tables(struct mvgbe_registers *regs)
304{
305	int table_index;
306
307	/* Clear DA filter unicast table (Ex_dFUT) */
308	for (table_index = 0; table_index < 4; ++table_index)
309		MVGBE_REG_WR(regs->dfut[table_index], 0);
310
311	for (table_index = 0; table_index < 64; ++table_index) {
312		/* Clear DA filter special multicast table (Ex_dFSMT) */
313		MVGBE_REG_WR(regs->dfsmt[table_index], 0);
314		/* Clear DA filter other multicast table (Ex_dFOMT) */
315		MVGBE_REG_WR(regs->dfomt[table_index], 0);
316	}
317}
318
319/*
320 * port_uc_addr - This function Set the port unicast address table
321 *
322 * This function locates the proper entry in the Unicast table for the
323 * specified MAC nibble and sets its properties according to function
324 * parameters.
325 * This function add/removes MAC addresses from the port unicast address
326 * table.
327 *
328 * @uc_nibble	Unicast MAC Address last nibble.
329 * @option      0 = Add, 1 = remove address.
330 *
331 * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
332 */
333static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
334			int option)
335{
336	u32 unicast_reg;
337	u32 tbl_offset;
338	u32 reg_offset;
339
340	/* Locate the Unicast table entry */
341	uc_nibble = (0xf & uc_nibble);
342	/* Register offset from unicast table base */
343	tbl_offset = (uc_nibble / 4);
344	/* Entry offset within the above register */
345	reg_offset = uc_nibble % 4;
346
347	switch (option) {
348	case REJECT_MAC_ADDR:
349		/*
350		 * Clear accepts frame bit at specified unicast
351		 * DA table entry
352		 */
353		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
354		unicast_reg &= (0xFF << (8 * reg_offset));
355		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
356		break;
357	case ACCEPT_MAC_ADDR:
358		/* Set accepts frame bit at unicast DA filter table entry */
359		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
360		unicast_reg &= (0xFF << (8 * reg_offset));
361		unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
362		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
363		break;
364	default:
365		return 0;
366	}
367	return 1;
368}
369
370/*
371 * port_uc_addr_set - This function Set the port Unicast address.
372 */
373static void port_uc_addr_set(struct mvgbe_device *dmvgbe, u8 *p_addr)
374{
375	struct mvgbe_registers *regs = dmvgbe->regs;
376	u32 mac_h;
377	u32 mac_l;
378
379	mac_l = (p_addr[4] << 8) | (p_addr[5]);
380	mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
381		(p_addr[3] << 0);
382
383	MVGBE_REG_WR(regs->macal, mac_l);
384	MVGBE_REG_WR(regs->macah, mac_h);
385
386	/* Accept frames of this address */
387	port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
388}
389
390/*
391 * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
392 */
393static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
394{
395	struct mvgbe_rxdesc *p_rx_desc;
396	int i;
397
398	/* initialize the Rx descriptors ring */
399	p_rx_desc = dmvgbe->p_rxdesc;
400	for (i = 0; i < RINGSZ; i++) {
401		p_rx_desc->cmd_sts =
402			MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
403		p_rx_desc->buf_size = PKTSIZE_ALIGN;
404		p_rx_desc->byte_cnt = 0;
405		p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
406		if (i == (RINGSZ - 1))
407			p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
408		else {
409			p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
410				((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
411			p_rx_desc = p_rx_desc->nxtdesc_p;
412		}
413	}
414	dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
415}
416
417static int __mvgbe_init(struct mvgbe_device *dmvgbe, u8 *enetaddr,
418			const char *name)
419{
420	struct mvgbe_registers *regs = dmvgbe->regs;
421	/* setup RX rings */
422	mvgbe_init_rx_desc_ring(dmvgbe);
423
424	/* Clear the ethernet port interrupts */
425	MVGBE_REG_WR(regs->ic, 0);
426	MVGBE_REG_WR(regs->ice, 0);
427	/* Unmask RX buffer and TX end interrupt */
428	MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
429	/* Unmask phy and link status changes interrupts */
430	MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
431
432	set_dram_access(regs);
433	port_init_mac_tables(regs);
434	port_uc_addr_set(dmvgbe, enetaddr);
435
436	/* Assign port configuration and command. */
437	MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
438	MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
439	MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
440
441	/* Assign port SDMA configuration */
442	MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
443	MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
444	MVGBE_REG_WR(regs->tqx[0].tqxtbc,
445		(QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
446	/* Turn off the port/RXUQ bandwidth limitation */
447	MVGBE_REG_WR(regs->pmtu, 0);
448
449	/* Set maximum receive buffer to 9700 bytes */
450	MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
451			| (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
452
453	/* Enable port initially */
454	MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
455
456	/*
457	 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
458	 * disable the leaky bucket mechanism .
459	 */
460	MVGBE_REG_WR(regs->pmtu, 0);
461
462	/* Assignment of Rx CRDB of given RXUQ */
463	MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
464	/* ensure previous write is done before enabling Rx DMA */
465	isb();
466	/* Enable port Rx. */
467	MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
468
469	return 0;
470}
471
472static void __mvgbe_halt(struct mvgbe_device *dmvgbe)
473{
474	struct mvgbe_registers *regs = dmvgbe->regs;
475
476	/* Disable all gigE address decoder */
477	MVGBE_REG_WR(regs->bare, 0x3f);
478
479	stop_queue(&regs->tqc);
480	stop_queue(&regs->rqc);
481
482	/* Disable port */
483	MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
484	/* Set port is not reset */
485	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
486#ifdef CONFIG_SYS_MII_MODE
487	/* Set MMI interface up */
488	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
489#endif
490	/* Disable & mask ethernet port interrupts */
491	MVGBE_REG_WR(regs->ic, 0);
492	MVGBE_REG_WR(regs->ice, 0);
493	MVGBE_REG_WR(regs->pim, 0);
494	MVGBE_REG_WR(regs->peim, 0);
495}
496
497static int mvgbe_write_hwaddr(struct udevice *dev)
498{
499	struct eth_pdata *pdata = dev_get_plat(dev);
500
501	port_uc_addr_set(dev_get_priv(dev), pdata->enetaddr);
502
503	return 0;
504}
505
506static int __mvgbe_send(struct mvgbe_device *dmvgbe, void *dataptr,
507			int datasize)
508{
509	struct mvgbe_registers *regs = dmvgbe->regs;
510	struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
511	void *p = (void *)dataptr;
512	u32 cmd_sts;
513	u32 txuq0_reg_addr;
514
515	/* Copy buffer if it's misaligned */
516	if ((u32) dataptr & 0x07) {
517		if (datasize > PKTSIZE_ALIGN) {
518			printf("Non-aligned data too large (%d)\n",
519					datasize);
520			return -1;
521		}
522
523		memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
524		p = dmvgbe->p_aligned_txbuf;
525	}
526
527	p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
528	p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
529	p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
530	p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
531	p_txdesc->buf_ptr = (u8 *) p;
532	p_txdesc->byte_cnt = datasize;
533
534	/* Set this tc desc as zeroth TXUQ */
535	txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
536	writel((u32) p_txdesc, txuq0_reg_addr);
537
538	/* ensure tx desc writes above are performed before we start Tx DMA */
539	isb();
540
541	/* Apply send command using zeroth TXUQ */
542	MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
543
544	/*
545	 * wait for packet xmit completion
546	 */
547	cmd_sts = readl(&p_txdesc->cmd_sts);
548	while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
549		/* return fail if error is detected */
550		if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
551				(MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
552				cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
553			printf("Err..(%s) in xmit packet\n", __func__);
554			return -1;
555		}
556		cmd_sts = readl(&p_txdesc->cmd_sts);
557	};
558	return 0;
559}
560
561static int __mvgbe_recv(struct mvgbe_device *dmvgbe, uchar **packetp)
562{
563	struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
564	u32 cmd_sts;
565	u32 timeout = 0;
566	u32 rxdesc_curr_addr;
567	unsigned char *data;
568	int rx_bytes = 0;
569
570	*packetp = NULL;
571
572	/* wait untill rx packet available or timeout */
573	do {
574		if (timeout < MVGBE_PHY_SMI_TIMEOUT)
575			timeout++;
576		else {
577			debug("%s time out...\n", __func__);
578			return -1;
579		}
580	} while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
581
582	if (p_rxdesc_curr->byte_cnt != 0) {
583		debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
584			__func__, (u32) p_rxdesc_curr->byte_cnt,
585			(u32) p_rxdesc_curr->buf_ptr,
586			(u32) p_rxdesc_curr->cmd_sts);
587	}
588
589	/*
590	 * In case received a packet without first/last bits on
591	 * OR the error summary bit is on,
592	 * the packets needs to be dropeed.
593	 */
594	cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
595
596	if ((cmd_sts &
597		(MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
598		!= (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
599
600		printf("Err..(%s) Dropping packet spread on"
601			" multiple descriptors\n", __func__);
602
603	} else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
604
605		printf("Err..(%s) Dropping packet with errors\n",
606			__func__);
607
608	} else {
609		/* !!! call higher layer processing */
610		debug("%s: Sending Received packet to"
611		      " upper layer (net_process_received_packet)\n",
612		      __func__);
613
614		data = (p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET);
615		rx_bytes = (int)(p_rxdesc_curr->byte_cnt -
616						  RX_BUF_OFFSET);
617
618		*packetp = data;
619	}
620	/*
621	 * free these descriptors and point next in the ring
622	 */
623	p_rxdesc_curr->cmd_sts =
624		MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
625	p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
626	p_rxdesc_curr->byte_cnt = 0;
627
628	rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
629	writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
630
631	return rx_bytes;
632}
633
634#if defined(CONFIG_PHYLIB)
635static struct phy_device *__mvgbe_phy_init(struct udevice *dev,
636					   struct mii_dev *bus,
637					   phy_interface_t phy_interface,
638					   int phyid)
639{
640	struct phy_device *phydev;
641
642	/* Set phy address of the port */
643	miiphy_write(dev->name, MV_PHY_ADR_REQUEST, MV_PHY_ADR_REQUEST,
644		     phyid);
645
646	/* Make sure the selected PHY page is 0 before connecting */
647	miiphy_write(dev->name, phyid, MVGBE_PGADR_REG, 0);
648
649	phydev = phy_connect(bus, phyid, dev, phy_interface);
650	if (!phydev) {
651		printf("phy_connect failed\n");
652		return NULL;
653	}
654
655	phy_config(phydev);
656	phy_startup(phydev);
657
658	return phydev;
659}
660#endif /* CONFIG_PHYLIB */
661
662static int mvgbe_alloc_buffers(struct mvgbe_device *dmvgbe)
663{
664	dmvgbe->p_rxdesc = memalign(PKTALIGN,
665				    MV_RXQ_DESC_ALIGNED_SIZE * RINGSZ + 1);
666	if (!dmvgbe->p_rxdesc)
667		goto error1;
668
669	dmvgbe->p_rxbuf = memalign(PKTALIGN,
670				   RINGSZ * PKTSIZE_ALIGN + 1);
671	if (!dmvgbe->p_rxbuf)
672		goto error2;
673
674	dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
675	if (!dmvgbe->p_aligned_txbuf)
676		goto error3;
677
678	dmvgbe->p_txdesc = memalign(PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
679	if (!dmvgbe->p_txdesc)
680		goto error4;
681
682	return 0;
683
684error4:
685	free(dmvgbe->p_aligned_txbuf);
686error3:
687	free(dmvgbe->p_rxbuf);
688error2:
689	free(dmvgbe->p_rxdesc);
690error1:
691	return -ENOMEM;
692}
693
694static int mvgbe_port_is_fixed_link(struct mvgbe_device *dmvgbe)
695{
696	return dmvgbe->phyaddr > PHY_MAX_ADDR;
697}
698
699static int mvgbe_start(struct udevice *dev)
700{
701	struct eth_pdata *pdata = dev_get_plat(dev);
702	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
703	int ret;
704
705	ret = __mvgbe_init(dmvgbe, pdata->enetaddr, dev->name);
706	if (ret)
707		return ret;
708
709	if (!mvgbe_port_is_fixed_link(dmvgbe)) {
710		dmvgbe->phydev = __mvgbe_phy_init(dev, dmvgbe->bus,
711						  dmvgbe->phy_interface,
712						  dmvgbe->phyaddr);
713		if (!dmvgbe->phydev)
714			return -ENODEV;
715	}
716
717	return 0;
718}
719
720static int mvgbe_send(struct udevice *dev, void *packet, int length)
721{
722	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
723
724	return __mvgbe_send(dmvgbe, packet, length);
725}
726
727static int mvgbe_recv(struct udevice *dev, int flags, uchar **packetp)
728{
729	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
730
731	return __mvgbe_recv(dmvgbe, packetp);
732}
733
734static void mvgbe_stop(struct udevice *dev)
735{
736	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
737
738	__mvgbe_halt(dmvgbe);
739}
740
741static int mvgbe_probe(struct udevice *dev)
742{
743	struct eth_pdata *pdata = dev_get_plat(dev);
744	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
745	struct mii_dev *bus;
746	int ret;
747
748	ret = mvgbe_alloc_buffers(dmvgbe);
749	if (ret)
750		return ret;
751
752	dmvgbe->regs = (void __iomem *)pdata->iobase;
753
754	bus  = mdio_alloc();
755	if (!bus) {
756		printf("Failed to allocate MDIO bus\n");
757		return -ENOMEM;
758	}
759
760	bus->read = smi_reg_read;
761	bus->write = smi_reg_write;
762	snprintf(bus->name, sizeof(bus->name), dev->name);
763	bus->priv = dmvgbe;
764	dmvgbe->bus = bus;
765
766	ret = mdio_register(bus);
767	if (ret < 0)
768		return ret;
769
770	return 0;
771}
772
773static const struct eth_ops mvgbe_ops = {
774	.start		= mvgbe_start,
775	.send		= mvgbe_send,
776	.recv		= mvgbe_recv,
777	.stop		= mvgbe_stop,
778	.write_hwaddr	= mvgbe_write_hwaddr,
779};
780
781static int mvgbe_of_to_plat(struct udevice *dev)
782{
783	struct eth_pdata *pdata = dev_get_plat(dev);
784	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
785	void *blob = (void *)gd->fdt_blob;
786	int node = dev_of_offset(dev);
787	int fl_node;
788	int pnode;
789	unsigned long addr;
790
791	pdata->iobase = dev_read_addr(dev);
792	pdata->phy_interface = -1;
793
794	pnode = fdt_node_offset_by_compatible(blob, node,
795					      "marvell,kirkwood-eth-port");
796
797	/* Get phy-mode / phy_interface from DT */
798	pdata->phy_interface = dev_read_phy_mode(dev);
799	if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
800		pdata->phy_interface = PHY_INTERFACE_MODE_GMII;
801
802	dmvgbe->phy_interface = pdata->phy_interface;
803
804	/* fetch 'fixed-link' property */
805	fl_node = fdt_subnode_offset(blob, pnode, "fixed-link");
806	if (fl_node != -FDT_ERR_NOTFOUND) {
807		/* set phy_addr to invalid value for fixed link */
808		dmvgbe->phyaddr = PHY_MAX_ADDR + 1;
809		dmvgbe->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
810		dmvgbe->speed = fdtdec_get_int(blob, fl_node, "speed", 0);
811	} else {
812		/* Now read phyaddr from DT */
813		addr = fdtdec_lookup_phandle(blob, pnode, "phy-handle");
814		if (addr > 0)
815			dmvgbe->phyaddr = fdtdec_get_int(blob, addr, "reg", 0);
816	}
817
818	return 0;
819}
820
821static const struct udevice_id mvgbe_ids[] = {
822	{ .compatible = "marvell,kirkwood-eth" },
823	{ }
824};
825
826U_BOOT_DRIVER(mvgbe) = {
827	.name	= "mvgbe",
828	.id	= UCLASS_ETH,
829	.of_match = mvgbe_ids,
830	.of_to_plat = mvgbe_of_to_plat,
831	.probe	= mvgbe_probe,
832	.ops	= &mvgbe_ops,
833	.priv_auto	= sizeof(struct mvgbe_device),
834	.plat_auto	= sizeof(struct eth_pdata),
835};
836