1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2005-2006 Atmel Corporation
4 */
5#include <clk.h>
6#include <cpu_func.h>
7#include <dm.h>
8#include <log.h>
9#include <asm/global_data.h>
10#include <linux/delay.h>
11
12/*
13 * The u-boot networking stack is a little weird.  It seems like the
14 * networking core allocates receive buffers up front without any
15 * regard to the hardware that's supposed to actually receive those
16 * packets.
17 *
18 * The MACB receives packets into 128-byte receive buffers, so the
19 * buffers allocated by the core isn't very practical to use.  We'll
20 * allocate our own, but we need one such buffer in case a packet
21 * wraps around the DMA ring so that we have to copy it.
22 *
23 * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific
24 * configuration header.  This way, the core allocates one RX buffer
25 * and one TX buffer, each of which can hold a ethernet packet of
26 * maximum size.
27 *
28 * For some reason, the networking core unconditionally specifies a
29 * 32-byte packet "alignment" (which really should be called
30 * "padding").  MACB shouldn't need that, but we'll refrain from any
31 * core modifications here...
32 */
33
34#include <net.h>
35#include <malloc.h>
36#include <miiphy.h>
37
38#include <linux/mii.h>
39#include <asm/io.h>
40#include <linux/dma-mapping.h>
41#include <asm/arch/clk.h>
42#include <linux/errno.h>
43
44#include "macb.h"
45
46DECLARE_GLOBAL_DATA_PTR;
47
48/*
49 * These buffer sizes must be power of 2 and divisible
50 * by RX_BUFFER_MULTIPLE
51 */
52#define MACB_RX_BUFFER_SIZE		128
53#define GEM_RX_BUFFER_SIZE		2048
54#define RX_BUFFER_MULTIPLE		64
55
56#define MACB_RX_RING_SIZE		32
57#define MACB_TX_RING_SIZE		16
58
59#define MACB_TX_TIMEOUT		1000
60#define MACB_AUTONEG_TIMEOUT	5000000
61
62#ifdef CONFIG_MACB_ZYNQ
63/* INCR4 AHB bursts */
64#define MACB_ZYNQ_GEM_DMACR_BLENGTH		0x00000004
65/* Use full configured addressable space (8 Kb) */
66#define MACB_ZYNQ_GEM_DMACR_RXSIZE		0x00000300
67/* Use full configured addressable space (4 Kb) */
68#define MACB_ZYNQ_GEM_DMACR_TXSIZE		0x00000400
69/* Set RXBUF with use of 128 byte */
70#define MACB_ZYNQ_GEM_DMACR_RXBUF		0x00020000
71#define MACB_ZYNQ_GEM_DMACR_INIT \
72				(MACB_ZYNQ_GEM_DMACR_BLENGTH | \
73				MACB_ZYNQ_GEM_DMACR_RXSIZE | \
74				MACB_ZYNQ_GEM_DMACR_TXSIZE | \
75				MACB_ZYNQ_GEM_DMACR_RXBUF)
76#endif
77
78struct macb_dma_desc {
79	u32	addr;
80	u32	ctrl;
81};
82
83struct macb_dma_desc_64 {
84	u32 addrh;
85	u32 unused;
86};
87
88#define HW_DMA_CAP_32B		0
89#define HW_DMA_CAP_64B		1
90
91#define DMA_DESC_SIZE		16
92#define DMA_DESC_BYTES(n)	((n) * DMA_DESC_SIZE)
93#define MACB_TX_DMA_DESC_SIZE	(DMA_DESC_BYTES(MACB_TX_RING_SIZE))
94#define MACB_RX_DMA_DESC_SIZE	(DMA_DESC_BYTES(MACB_RX_RING_SIZE))
95#define MACB_TX_DUMMY_DMA_DESC_SIZE	(DMA_DESC_BYTES(1))
96
97#define DESC_PER_CACHELINE_32	(ARCH_DMA_MINALIGN/sizeof(struct macb_dma_desc))
98#define DESC_PER_CACHELINE_64	(ARCH_DMA_MINALIGN/DMA_DESC_SIZE)
99
100#define RXBUF_FRMLEN_MASK	0x00000fff
101#define TXBUF_FRMLEN_MASK	0x000007ff
102
103struct macb_device {
104	void			*regs;
105
106	bool			is_big_endian;
107
108	const struct macb_config *config;
109
110	unsigned int		rx_tail;
111	unsigned int		tx_head;
112	unsigned int		tx_tail;
113	unsigned int		next_rx_tail;
114	bool			wrapped;
115
116	void			*rx_buffer;
117	void			*tx_buffer;
118	struct macb_dma_desc	*rx_ring;
119	struct macb_dma_desc	*tx_ring;
120	size_t			rx_buffer_size;
121
122	unsigned long		rx_buffer_dma;
123	unsigned long		rx_ring_dma;
124	unsigned long		tx_ring_dma;
125
126	struct macb_dma_desc	*dummy_desc;
127	unsigned long		dummy_desc_dma;
128
129	const struct device	*dev;
130	unsigned int    	duplex;
131	unsigned int    	speed;
132	unsigned short		phy_addr;
133	struct mii_dev		*bus;
134#ifdef CONFIG_PHYLIB
135	struct phy_device	*phydev;
136#endif
137
138#ifdef CONFIG_CLK
139	unsigned long		pclk_rate;
140#endif
141	phy_interface_t		phy_interface;
142};
143
144struct macb_usrio_cfg {
145	unsigned int		mii;
146	unsigned int		rmii;
147	unsigned int		rgmii;
148	unsigned int		clken;
149};
150
151struct macb_config {
152	unsigned int		dma_burst_length;
153	unsigned int		hw_dma_cap;
154	unsigned int		caps;
155
156	int			(*clk_init)(struct udevice *dev, ulong rate);
157	const struct macb_usrio_cfg	*usrio;
158};
159
160static int macb_is_gem(struct macb_device *macb)
161{
162	return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) >= 0x2;
163}
164
165#ifndef cpu_is_sama5d2
166#define cpu_is_sama5d2() 0
167#endif
168
169#ifndef cpu_is_sama5d4
170#define cpu_is_sama5d4() 0
171#endif
172
173static int gem_is_gigabit_capable(struct macb_device *macb)
174{
175	/*
176	 * The GEM controllers embedded in SAMA5D2 and SAMA5D4 are
177	 * configured to support only 10/100.
178	 */
179	return macb_is_gem(macb) && !cpu_is_sama5d2() && !cpu_is_sama5d4();
180}
181
182/* Is the port a fixed link */
183static int macb_port_is_fixed_link(struct macb_device *macb)
184{
185	return macb->phy_addr > PHY_MAX_ADDR;
186}
187
188static void macb_mdio_write(struct macb_device *macb, u8 phy_adr, u8 reg,
189			    u16 value)
190{
191	unsigned long netctl;
192	unsigned long netstat;
193	unsigned long frame;
194
195	netctl = macb_readl(macb, NCR);
196	netctl |= MACB_BIT(MPE);
197	macb_writel(macb, NCR, netctl);
198
199	frame = (MACB_BF(SOF, 1)
200		 | MACB_BF(RW, 1)
201		 | MACB_BF(PHYA, phy_adr)
202		 | MACB_BF(REGA, reg)
203		 | MACB_BF(CODE, 2)
204		 | MACB_BF(DATA, value));
205	macb_writel(macb, MAN, frame);
206
207	do {
208		netstat = macb_readl(macb, NSR);
209	} while (!(netstat & MACB_BIT(IDLE)));
210
211	netctl = macb_readl(macb, NCR);
212	netctl &= ~MACB_BIT(MPE);
213	macb_writel(macb, NCR, netctl);
214}
215
216static u16 macb_mdio_read(struct macb_device *macb, u8 phy_adr, u8 reg)
217{
218	unsigned long netctl;
219	unsigned long netstat;
220	unsigned long frame;
221
222	netctl = macb_readl(macb, NCR);
223	netctl |= MACB_BIT(MPE);
224	macb_writel(macb, NCR, netctl);
225
226	frame = (MACB_BF(SOF, 1)
227		 | MACB_BF(RW, 2)
228		 | MACB_BF(PHYA, phy_adr)
229		 | MACB_BF(REGA, reg)
230		 | MACB_BF(CODE, 2));
231	macb_writel(macb, MAN, frame);
232
233	do {
234		netstat = macb_readl(macb, NSR);
235	} while (!(netstat & MACB_BIT(IDLE)));
236
237	frame = macb_readl(macb, MAN);
238
239	netctl = macb_readl(macb, NCR);
240	netctl &= ~MACB_BIT(MPE);
241	macb_writel(macb, NCR, netctl);
242
243	return MACB_BFEXT(DATA, frame);
244}
245
246void __weak arch_get_mdio_control(const char *name)
247{
248	return;
249}
250
251#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
252
253int macb_miiphy_read(struct mii_dev *bus, int phy_adr, int devad, int reg)
254{
255	u16 value = 0;
256	struct udevice *dev = eth_get_dev_by_name(bus->name);
257	struct macb_device *macb = dev_get_priv(dev);
258
259	arch_get_mdio_control(bus->name);
260	value = macb_mdio_read(macb, phy_adr, reg);
261
262	return value;
263}
264
265int macb_miiphy_write(struct mii_dev *bus, int phy_adr, int devad, int reg,
266		      u16 value)
267{
268	struct udevice *dev = eth_get_dev_by_name(bus->name);
269	struct macb_device *macb = dev_get_priv(dev);
270
271	arch_get_mdio_control(bus->name);
272	macb_mdio_write(macb, phy_adr, reg, value);
273
274	return 0;
275}
276#endif
277
278#define RX	1
279#define TX	0
280static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx)
281{
282	if (rx)
283		invalidate_dcache_range(macb->rx_ring_dma,
284			ALIGN(macb->rx_ring_dma + MACB_RX_DMA_DESC_SIZE,
285			      PKTALIGN));
286	else
287		invalidate_dcache_range(macb->tx_ring_dma,
288			ALIGN(macb->tx_ring_dma + MACB_TX_DMA_DESC_SIZE,
289			      PKTALIGN));
290}
291
292static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx)
293{
294	if (rx)
295		flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
296				   ALIGN(MACB_RX_DMA_DESC_SIZE, PKTALIGN));
297	else
298		flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
299				   ALIGN(MACB_TX_DMA_DESC_SIZE, PKTALIGN));
300}
301
302static inline void macb_flush_rx_buffer(struct macb_device *macb)
303{
304	flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
305			   ALIGN(macb->rx_buffer_size * MACB_RX_RING_SIZE,
306				 PKTALIGN));
307}
308
309static inline void macb_invalidate_rx_buffer(struct macb_device *macb)
310{
311	invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
312				ALIGN(macb->rx_buffer_size * MACB_RX_RING_SIZE,
313				      PKTALIGN));
314}
315
316#if defined(CONFIG_CMD_NET)
317
318static struct macb_dma_desc_64 *macb_64b_desc(struct macb_dma_desc *desc)
319{
320	return (struct macb_dma_desc_64 *)((void *)desc
321		+ sizeof(struct macb_dma_desc));
322}
323
324static void macb_set_addr(struct macb_device *macb, struct macb_dma_desc *desc,
325			  ulong addr)
326{
327	struct macb_dma_desc_64 *desc_64;
328
329	if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
330		desc_64 = macb_64b_desc(desc);
331		desc_64->addrh = upper_32_bits(addr);
332	}
333	desc->addr = lower_32_bits(addr);
334}
335
336static int _macb_send(struct macb_device *macb, const char *name, void *packet,
337		      int length)
338{
339	unsigned long paddr, ctrl;
340	unsigned int tx_head = macb->tx_head;
341	int i;
342
343	paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
344
345	ctrl = length & TXBUF_FRMLEN_MASK;
346	ctrl |= MACB_BIT(TX_LAST);
347	if (tx_head == (MACB_TX_RING_SIZE - 1)) {
348		ctrl |= MACB_BIT(TX_WRAP);
349		macb->tx_head = 0;
350	} else {
351		macb->tx_head++;
352	}
353
354	if (macb->config->hw_dma_cap & HW_DMA_CAP_64B)
355		tx_head = tx_head * 2;
356
357	macb->tx_ring[tx_head].ctrl = ctrl;
358	macb_set_addr(macb, &macb->tx_ring[tx_head], paddr);
359
360	barrier();
361	macb_flush_ring_desc(macb, TX);
362	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
363
364	/*
365	 * I guess this is necessary because the networking core may
366	 * re-use the transmit buffer as soon as we return...
367	 */
368	for (i = 0; i <= MACB_TX_TIMEOUT; i++) {
369		barrier();
370		macb_invalidate_ring_desc(macb, TX);
371		ctrl = macb->tx_ring[tx_head].ctrl;
372		if (ctrl & MACB_BIT(TX_USED))
373			break;
374		udelay(1);
375	}
376
377	dma_unmap_single(paddr, length, DMA_TO_DEVICE);
378
379	if (i <= MACB_TX_TIMEOUT) {
380		if (ctrl & MACB_BIT(TX_UNDERRUN))
381			printf("%s: TX underrun\n", name);
382		if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
383			printf("%s: TX buffers exhausted in mid frame\n", name);
384	} else {
385		printf("%s: TX timeout\n", name);
386	}
387
388	/* No one cares anyway */
389	return 0;
390}
391
392static void reclaim_rx_buffer(struct macb_device *macb,
393			      unsigned int idx)
394{
395	unsigned int mask;
396	unsigned int shift;
397	unsigned int i;
398
399	/*
400	 * There may be multiple descriptors per CPU cacheline,
401	 * so a cache flush would flush the whole line, meaning the content of other descriptors
402	 * in the cacheline would also flush. If one of the other descriptors had been
403	 * written to by the controller, the flush would cause those changes to be lost.
404	 *
405	 * To circumvent this issue, we do the actual freeing only when we need to free
406	 * the last descriptor in the current cacheline. When the current descriptor is the
407	 * last in the cacheline, we free all the descriptors that belong to that cacheline.
408	 */
409	if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
410		mask = DESC_PER_CACHELINE_64 - 1;
411		shift = 1;
412	} else {
413		mask = DESC_PER_CACHELINE_32 - 1;
414		shift = 0;
415	}
416
417	/* we exit without freeing if idx is not the last descriptor in the cacheline */
418	if ((idx & mask) != mask)
419		return;
420
421	for (i = idx & (~mask); i <= idx; i++)
422		macb->rx_ring[i << shift].addr &= ~MACB_BIT(RX_USED);
423}
424
425static void reclaim_rx_buffers(struct macb_device *macb,
426			       unsigned int new_tail)
427{
428	unsigned int i;
429
430	i = macb->rx_tail;
431
432	macb_invalidate_ring_desc(macb, RX);
433	while (i > new_tail) {
434		reclaim_rx_buffer(macb, i);
435		i++;
436		if (i >= MACB_RX_RING_SIZE)
437			i = 0;
438	}
439
440	while (i < new_tail) {
441		reclaim_rx_buffer(macb, i);
442		i++;
443	}
444
445	barrier();
446	macb_flush_ring_desc(macb, RX);
447	macb->rx_tail = new_tail;
448}
449
450static int _macb_recv(struct macb_device *macb, uchar **packetp)
451{
452	unsigned int next_rx_tail = macb->next_rx_tail;
453	void *buffer;
454	int length;
455	u32 status;
456	u8 flag = false;
457
458	macb->wrapped = false;
459	for (;;) {
460		macb_invalidate_ring_desc(macb, RX);
461
462		if (macb->config->hw_dma_cap & HW_DMA_CAP_64B)
463			next_rx_tail = next_rx_tail * 2;
464
465		if (!(macb->rx_ring[next_rx_tail].addr & MACB_BIT(RX_USED)))
466			return -EAGAIN;
467
468		status = macb->rx_ring[next_rx_tail].ctrl;
469		if (status & MACB_BIT(RX_SOF)) {
470			if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
471				next_rx_tail = next_rx_tail / 2;
472				flag = true;
473			}
474
475			if (next_rx_tail != macb->rx_tail)
476				reclaim_rx_buffers(macb, next_rx_tail);
477			macb->wrapped = false;
478		}
479
480		if (status & MACB_BIT(RX_EOF)) {
481			buffer = macb->rx_buffer +
482				macb->rx_buffer_size * macb->rx_tail;
483			length = status & RXBUF_FRMLEN_MASK;
484
485			macb_invalidate_rx_buffer(macb);
486			if (macb->wrapped) {
487				unsigned int headlen, taillen;
488
489				headlen = macb->rx_buffer_size *
490					(MACB_RX_RING_SIZE - macb->rx_tail);
491				taillen = length - headlen;
492				memcpy((void *)net_rx_packets[0],
493				       buffer, headlen);
494				memcpy((void *)net_rx_packets[0] + headlen,
495				       macb->rx_buffer, taillen);
496				*packetp = (void *)net_rx_packets[0];
497			} else {
498				*packetp = buffer;
499			}
500
501			if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
502				if (!flag)
503					next_rx_tail = next_rx_tail / 2;
504			}
505
506			if (++next_rx_tail >= MACB_RX_RING_SIZE)
507				next_rx_tail = 0;
508			macb->next_rx_tail = next_rx_tail;
509			return length;
510		} else {
511			if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
512				if (!flag)
513					next_rx_tail = next_rx_tail / 2;
514				flag = false;
515			}
516
517			if (++next_rx_tail >= MACB_RX_RING_SIZE) {
518				macb->wrapped = true;
519				next_rx_tail = 0;
520			}
521		}
522		barrier();
523	}
524}
525
526static void macb_phy_reset(struct macb_device *macb, const char *name)
527{
528	int i;
529	u16 status, adv;
530
531	adv = ADVERTISE_CSMA | ADVERTISE_ALL;
532	macb_mdio_write(macb, macb->phy_addr, MII_ADVERTISE, adv);
533	printf("%s: Starting autonegotiation...\n", name);
534	macb_mdio_write(macb, macb->phy_addr, MII_BMCR, (BMCR_ANENABLE
535					 | BMCR_ANRESTART));
536
537	for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
538		status = macb_mdio_read(macb, macb->phy_addr, MII_BMSR);
539		if (status & BMSR_ANEGCOMPLETE)
540			break;
541		udelay(100);
542	}
543
544	if (status & BMSR_ANEGCOMPLETE)
545		printf("%s: Autonegotiation complete\n", name);
546	else
547		printf("%s: Autonegotiation timed out (status=0x%04x)\n",
548		       name, status);
549}
550
551static int macb_phy_find(struct macb_device *macb, const char *name)
552{
553	int i;
554	u16 phy_id;
555
556	phy_id = macb_mdio_read(macb, macb->phy_addr, MII_PHYSID1);
557	if (phy_id != 0xffff) {
558		printf("%s: PHY present at %d\n", name, macb->phy_addr);
559		return 0;
560	}
561
562	/* Search for PHY... */
563	for (i = 0; i < 32; i++) {
564		macb->phy_addr = i;
565		phy_id = macb_mdio_read(macb, macb->phy_addr, MII_PHYSID1);
566		if (phy_id != 0xffff) {
567			printf("%s: PHY present at %d\n", name, i);
568			return 0;
569		}
570	}
571
572	/* PHY isn't up to snuff */
573	printf("%s: PHY not found\n", name);
574
575	return -ENODEV;
576}
577
578/**
579 * macb_linkspd_cb - Linkspeed change callback function
580 * @dev/@regs:	MACB udevice (DM version) or
581 *		Base Register of MACB devices (non-DM version)
582 * @speed:	Linkspeed
583 * Returns 0 when operation success and negative errno number
584 * when operation failed.
585 */
586static int macb_sifive_clk_init(struct udevice *dev, ulong rate)
587{
588	void *gemgxl_regs;
589
590	gemgxl_regs = dev_read_addr_index_ptr(dev, 1);
591	if (!gemgxl_regs)
592		return -ENODEV;
593
594	/*
595	 * SiFive GEMGXL TX clock operation mode:
596	 *
597	 * 0 = GMII mode. Use 125 MHz gemgxlclk from PRCI in TX logic
598	 *     and output clock on GMII output signal GTX_CLK
599	 * 1 = MII mode. Use MII input signal TX_CLK in TX logic
600	 */
601	writel(rate != 125000000, gemgxl_regs);
602	return 0;
603}
604
605static int macb_sama7g5_clk_init(struct udevice *dev, ulong rate)
606{
607	struct clk clk;
608	int ret;
609
610	ret = clk_get_by_name(dev, "tx_clk", &clk);
611	if (ret)
612		return ret;
613
614	/*
615	 * This is for using GCK. Clock rate is addressed via assigned-clock
616	 * property, so only clock enable is needed here. The switching to
617	 * proper clock rate depending on link speed is managed by IP logic.
618	 */
619	return clk_enable(&clk);
620}
621
622int __weak macb_linkspd_cb(struct udevice *dev, unsigned int speed)
623{
624#ifdef CONFIG_CLK
625	struct macb_device *macb = dev_get_priv(dev);
626	struct clk tx_clk;
627	ulong rate;
628	int ret;
629
630	switch (speed) {
631	case _10BASET:
632		rate = 2500000;		/* 2.5 MHz */
633		break;
634	case _100BASET:
635		rate = 25000000;	/* 25 MHz */
636		break;
637	case _1000BASET:
638		rate = 125000000;	/* 125 MHz */
639		break;
640	default:
641		/* does not change anything */
642		return 0;
643	}
644
645	if (macb->config->clk_init)
646		return macb->config->clk_init(dev, rate);
647
648	/*
649	 * "tx_clk" is an optional clock source for MACB.
650	 * Ignore if it does not exist in DT.
651	 */
652	ret = clk_get_by_name(dev, "tx_clk", &tx_clk);
653	if (ret)
654		return 0;
655
656	if (tx_clk.dev) {
657		ret = clk_set_rate(&tx_clk, rate);
658		if (ret < 0)
659			return ret;
660	}
661#endif
662
663	return 0;
664}
665
666static int macb_phy_init(struct udevice *dev, const char *name)
667{
668	struct macb_device *macb = dev_get_priv(dev);
669	u32 ncfgr;
670	u16 phy_id, status, adv, lpa;
671	int media, speed, duplex;
672	int ret;
673	int i;
674
675	arch_get_mdio_control(name);
676	/* If port is not fixed -> setup PHY */
677	if (!macb_port_is_fixed_link(macb)) {
678		/* Auto-detect phy_addr */
679		ret = macb_phy_find(macb, name);
680		if (ret)
681			return ret;
682
683		/* Check if the PHY is up to snuff... */
684		phy_id = macb_mdio_read(macb, macb->phy_addr, MII_PHYSID1);
685		if (phy_id == 0xffff) {
686			printf("%s: No PHY present\n", name);
687			return -ENODEV;
688		}
689
690#ifdef CONFIG_PHYLIB
691		macb->phydev = phy_connect(macb->bus, macb->phy_addr, dev,
692				     macb->phy_interface);
693		if (!macb->phydev) {
694			printf("phy_connect failed\n");
695			return -ENODEV;
696		}
697
698		phy_config(macb->phydev);
699#endif
700
701		status = macb_mdio_read(macb, macb->phy_addr, MII_BMSR);
702		if (!(status & BMSR_LSTATUS)) {
703			/* Try to re-negotiate if we don't have link already. */
704			macb_phy_reset(macb, name);
705
706			for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
707				status = macb_mdio_read(macb, macb->phy_addr, MII_BMSR);
708				if (status & BMSR_LSTATUS) {
709					/*
710					 * Delay a bit after the link is established,
711					 * so that the next xfer does not fail
712					 */
713					mdelay(10);
714					break;
715				}
716				udelay(100);
717			}
718		}
719
720		if (!(status & BMSR_LSTATUS)) {
721			printf("%s: link down (status: 0x%04x)\n",
722			       name, status);
723			return -ENETDOWN;
724		}
725
726		/* First check for GMAC and that it is GiB capable */
727		if (gem_is_gigabit_capable(macb)) {
728			lpa = macb_mdio_read(macb, macb->phy_addr, MII_STAT1000);
729
730			if (lpa & (LPA_1000FULL | LPA_1000HALF | LPA_1000XFULL |
731						LPA_1000XHALF)) {
732				duplex = ((lpa & (LPA_1000FULL | LPA_1000XFULL)) ?
733						1 : 0);
734
735				printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n",
736					name,
737					duplex ? "full" : "half",
738					lpa);
739
740				ncfgr = macb_readl(macb, NCFGR);
741				ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
742				ncfgr |= GEM_BIT(GBE);
743
744				if (duplex)
745					ncfgr |= MACB_BIT(FD);
746
747				macb_writel(macb, NCFGR, ncfgr);
748
749				ret = macb_linkspd_cb(dev, _1000BASET);
750				if (ret)
751					return ret;
752
753				return 0;
754			}
755		}
756
757		/* fall back for EMAC checking */
758		adv = macb_mdio_read(macb, macb->phy_addr, MII_ADVERTISE);
759		lpa = macb_mdio_read(macb, macb->phy_addr, MII_LPA);
760		media = mii_nway_result(lpa & adv);
761		speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
762			? 1 : 0);
763		duplex = (media & ADVERTISE_FULL) ? 1 : 0;
764		printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
765			name,
766			speed ? "100" : "10",
767			duplex ? "full" : "half",
768			lpa);
769	} else {
770		/* if macb port is a fixed link */
771		/* TODO : manage gigabit capable processors */
772		speed = macb->speed;
773		duplex = macb->duplex;
774		printf("%s: link up, %sMbps %s-duplex\n",
775			name,
776			speed ? "100" : "10",
777			duplex ? "full" : "half");
778	}
779
780	ncfgr = macb_readl(macb, NCFGR);
781	ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE));
782	if (speed) {
783		ncfgr |= MACB_BIT(SPD);
784		ret = macb_linkspd_cb(dev, _100BASET);
785	} else {
786		ret = macb_linkspd_cb(dev, _10BASET);
787	}
788
789	if (ret)
790		return ret;
791
792	if (duplex)
793		ncfgr |= MACB_BIT(FD);
794	macb_writel(macb, NCFGR, ncfgr);
795
796	return 0;
797}
798
799static int gmac_init_multi_queues(struct macb_device *macb)
800{
801	int i, num_queues = 1;
802	u32 queue_mask;
803	unsigned long paddr;
804
805	/* bit 0 is never set but queue 0 always exists */
806	queue_mask = gem_readl(macb, DCFG6) & 0xff;
807	queue_mask |= 0x1;
808
809	for (i = 1; i < MACB_MAX_QUEUES; i++)
810		if (queue_mask & (1 << i))
811			num_queues++;
812
813	macb->dummy_desc->ctrl = MACB_BIT(TX_USED);
814	macb->dummy_desc->addr = 0;
815	flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma +
816			ALIGN(MACB_TX_DUMMY_DMA_DESC_SIZE, PKTALIGN));
817	paddr = macb->dummy_desc_dma;
818
819	for (i = 1; i < num_queues; i++) {
820		gem_writel_queue_TBQP(macb, lower_32_bits(paddr), i - 1);
821		gem_writel_queue_RBQP(macb, lower_32_bits(paddr), i - 1);
822		if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
823			gem_writel_queue_TBQPH(macb, upper_32_bits(paddr),
824					       i - 1);
825			gem_writel_queue_RBQPH(macb, upper_32_bits(paddr),
826					       i - 1);
827		}
828	}
829	return 0;
830}
831
832static void gmac_configure_dma(struct macb_device *macb)
833{
834	u32 buffer_size;
835	u32 dmacfg;
836
837	buffer_size = macb->rx_buffer_size / RX_BUFFER_MULTIPLE;
838	dmacfg = gem_readl(macb, DMACFG) & ~GEM_BF(RXBS, -1L);
839	dmacfg |= GEM_BF(RXBS, buffer_size);
840
841	if (macb->config->dma_burst_length)
842		dmacfg = GEM_BFINS(FBLDO,
843				   macb->config->dma_burst_length, dmacfg);
844
845	dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
846	dmacfg &= ~GEM_BIT(ENDIA_PKT);
847
848	if (macb->is_big_endian)
849		dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
850	else
851		dmacfg &= ~GEM_BIT(ENDIA_DESC);
852
853	dmacfg &= ~GEM_BIT(ADDR64);
854	if (macb->config->hw_dma_cap & HW_DMA_CAP_64B)
855		dmacfg |= GEM_BIT(ADDR64);
856
857	gem_writel(macb, DMACFG, dmacfg);
858}
859
860static int _macb_init(struct udevice *dev, const char *name)
861{
862	struct macb_device *macb = dev_get_priv(dev);
863	unsigned int val = 0;
864	unsigned long paddr;
865	int ret;
866	int i;
867	int count;
868
869	/*
870	 * macb_halt should have been called at some point before now,
871	 * so we'll assume the controller is idle.
872	 */
873
874	/* initialize DMA descriptors */
875	paddr = macb->rx_buffer_dma;
876	for (i = 0; i < MACB_RX_RING_SIZE; i++) {
877		if (i == (MACB_RX_RING_SIZE - 1))
878			paddr |= MACB_BIT(RX_WRAP);
879		if (macb->config->hw_dma_cap & HW_DMA_CAP_64B)
880			count = i * 2;
881		else
882			count = i;
883		macb->rx_ring[count].ctrl = 0;
884		macb_set_addr(macb, &macb->rx_ring[count], paddr);
885		paddr += macb->rx_buffer_size;
886	}
887	macb_flush_ring_desc(macb, RX);
888	macb_flush_rx_buffer(macb);
889
890	for (i = 0; i < MACB_TX_RING_SIZE; i++) {
891		if (macb->config->hw_dma_cap & HW_DMA_CAP_64B)
892			count = i * 2;
893		else
894			count = i;
895		macb_set_addr(macb, &macb->tx_ring[count], 0);
896		if (i == (MACB_TX_RING_SIZE - 1))
897			macb->tx_ring[count].ctrl = MACB_BIT(TX_USED) |
898				MACB_BIT(TX_WRAP);
899		else
900			macb->tx_ring[count].ctrl = MACB_BIT(TX_USED);
901	}
902	macb_flush_ring_desc(macb, TX);
903
904	macb->rx_tail = 0;
905	macb->tx_head = 0;
906	macb->tx_tail = 0;
907	macb->next_rx_tail = 0;
908
909#ifdef CONFIG_MACB_ZYNQ
910	gem_writel(macb, DMACFG, MACB_ZYNQ_GEM_DMACR_INIT);
911#endif
912
913	macb_writel(macb, RBQP, lower_32_bits(macb->rx_ring_dma));
914	macb_writel(macb, TBQP, lower_32_bits(macb->tx_ring_dma));
915	if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
916		macb_writel(macb, RBQPH, upper_32_bits(macb->rx_ring_dma));
917		macb_writel(macb, TBQPH, upper_32_bits(macb->tx_ring_dma));
918	}
919
920	if (macb_is_gem(macb)) {
921		/* Initialize DMA properties */
922		gmac_configure_dma(macb);
923		/* Check the multi queue and initialize the queue for tx */
924		gmac_init_multi_queues(macb);
925
926		/*
927		 * When the GMAC IP with GE feature, this bit is used to
928		 * select interface between RGMII and GMII.
929		 * When the GMAC IP without GE feature, this bit is used
930		 * to select interface between RMII and MII.
931		 */
932		if (macb->phy_interface == PHY_INTERFACE_MODE_RGMII ||
933		    macb->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
934		    macb->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
935		    macb->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
936			val = macb->config->usrio->rgmii;
937		else if (macb->phy_interface == PHY_INTERFACE_MODE_RMII)
938			val = macb->config->usrio->rmii;
939		else if (macb->phy_interface == PHY_INTERFACE_MODE_MII)
940			val = macb->config->usrio->mii;
941
942		if (macb->config->caps & MACB_CAPS_USRIO_HAS_CLKEN)
943			val |= macb->config->usrio->clken;
944
945		gem_writel(macb, USRIO, val);
946
947		if (macb->phy_interface == PHY_INTERFACE_MODE_SGMII) {
948			unsigned int ncfgr = macb_readl(macb, NCFGR);
949
950			ncfgr |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
951			macb_writel(macb, NCFGR, ncfgr);
952		}
953	} else {
954	/* choose RMII or MII mode. This depends on the board */
955#ifdef CONFIG_AT91FAMILY
956		if (macb->phy_interface == PHY_INTERFACE_MODE_RMII) {
957			macb_writel(macb, USRIO,
958				    macb->config->usrio->rmii |
959				    macb->config->usrio->clken);
960		} else {
961			macb_writel(macb, USRIO, macb->config->usrio->clken);
962		}
963#else
964		if (macb->phy_interface == PHY_INTERFACE_MODE_RMII)
965			macb_writel(macb, USRIO, 0);
966		else
967			macb_writel(macb, USRIO, macb->config->usrio->mii);
968#endif
969	}
970
971	ret = macb_phy_init(dev, name);
972	if (ret)
973		return ret;
974
975	/* Enable TX and RX */
976	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
977
978	return 0;
979}
980
981static void _macb_halt(struct macb_device *macb)
982{
983	u32 ncr, tsr;
984
985	/* Halt the controller and wait for any ongoing transmission to end. */
986	ncr = macb_readl(macb, NCR);
987	ncr |= MACB_BIT(THALT);
988	macb_writel(macb, NCR, ncr);
989
990	do {
991		tsr = macb_readl(macb, TSR);
992	} while (tsr & MACB_BIT(TGO));
993
994	/* Disable TX and RX, and clear statistics */
995	macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
996}
997
998static int _macb_write_hwaddr(struct macb_device *macb, unsigned char *enetaddr)
999{
1000	u32 hwaddr_bottom;
1001	u16 hwaddr_top;
1002
1003	/* set hardware address */
1004	hwaddr_bottom = enetaddr[0] | enetaddr[1] << 8 |
1005			enetaddr[2] << 16 | enetaddr[3] << 24;
1006	macb_writel(macb, SA1B, hwaddr_bottom);
1007	hwaddr_top = enetaddr[4] | enetaddr[5] << 8;
1008	macb_writel(macb, SA1T, hwaddr_top);
1009	return 0;
1010}
1011
1012static u32 macb_mdc_clk_div(int id, struct macb_device *macb)
1013{
1014	u32 config;
1015#if defined(CONFIG_CLK)
1016	unsigned long macb_hz = macb->pclk_rate;
1017#else
1018	unsigned long macb_hz = get_macb_pclk_rate(id);
1019#endif
1020
1021	if (macb_hz < 20000000)
1022		config = MACB_BF(CLK, MACB_CLK_DIV8);
1023	else if (macb_hz < 40000000)
1024		config = MACB_BF(CLK, MACB_CLK_DIV16);
1025	else if (macb_hz < 80000000)
1026		config = MACB_BF(CLK, MACB_CLK_DIV32);
1027	else
1028		config = MACB_BF(CLK, MACB_CLK_DIV64);
1029
1030	return config;
1031}
1032
1033static u32 gem_mdc_clk_div(int id, struct macb_device *macb)
1034{
1035	u32 config;
1036
1037#if defined(CONFIG_CLK)
1038	unsigned long macb_hz = macb->pclk_rate;
1039#else
1040	unsigned long macb_hz = get_macb_pclk_rate(id);
1041#endif
1042
1043	if (macb_hz < 20000000)
1044		config = GEM_BF(CLK, GEM_CLK_DIV8);
1045	else if (macb_hz < 40000000)
1046		config = GEM_BF(CLK, GEM_CLK_DIV16);
1047	else if (macb_hz < 80000000)
1048		config = GEM_BF(CLK, GEM_CLK_DIV32);
1049	else if (macb_hz < 120000000)
1050		config = GEM_BF(CLK, GEM_CLK_DIV48);
1051	else if (macb_hz < 160000000)
1052		config = GEM_BF(CLK, GEM_CLK_DIV64);
1053	else if (macb_hz < 240000000)
1054		config = GEM_BF(CLK, GEM_CLK_DIV96);
1055	else if (macb_hz < 320000000)
1056		config = GEM_BF(CLK, GEM_CLK_DIV128);
1057	else
1058		config = GEM_BF(CLK, GEM_CLK_DIV224);
1059
1060	return config;
1061}
1062
1063/*
1064 * Get the DMA bus width field of the network configuration register that we
1065 * should program. We find the width from decoding the design configuration
1066 * register to find the maximum supported data bus width.
1067 */
1068static u32 macb_dbw(struct macb_device *macb)
1069{
1070	switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) {
1071	case 4:
1072		return GEM_BF(DBW, GEM_DBW128);
1073	case 2:
1074		return GEM_BF(DBW, GEM_DBW64);
1075	case 1:
1076	default:
1077		return GEM_BF(DBW, GEM_DBW32);
1078	}
1079}
1080
1081static void _macb_eth_initialize(struct macb_device *macb)
1082{
1083	int id = 0;	/* This is not used by functions we call */
1084	u32 ncfgr;
1085
1086	if (macb_is_gem(macb))
1087		macb->rx_buffer_size = GEM_RX_BUFFER_SIZE;
1088	else
1089		macb->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1090
1091	/* TODO: we need check the rx/tx_ring_dma is dcache line aligned */
1092	macb->rx_buffer = dma_alloc_coherent(macb->rx_buffer_size *
1093					     MACB_RX_RING_SIZE,
1094					     &macb->rx_buffer_dma);
1095	macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE,
1096					   &macb->rx_ring_dma);
1097	macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE,
1098					   &macb->tx_ring_dma);
1099	macb->dummy_desc = dma_alloc_coherent(MACB_TX_DUMMY_DMA_DESC_SIZE,
1100					   &macb->dummy_desc_dma);
1101
1102	/*
1103	 * Do some basic initialization so that we at least can talk
1104	 * to the PHY
1105	 */
1106	if (macb_is_gem(macb)) {
1107		ncfgr = gem_mdc_clk_div(id, macb);
1108		ncfgr |= macb_dbw(macb);
1109	} else {
1110		ncfgr = macb_mdc_clk_div(id, macb);
1111	}
1112
1113	macb_writel(macb, NCFGR, ncfgr);
1114}
1115
1116static int macb_start(struct udevice *dev)
1117{
1118	return _macb_init(dev, dev->name);
1119}
1120
1121static int macb_send(struct udevice *dev, void *packet, int length)
1122{
1123	struct macb_device *macb = dev_get_priv(dev);
1124
1125	return _macb_send(macb, dev->name, packet, length);
1126}
1127
1128static int macb_recv(struct udevice *dev, int flags, uchar **packetp)
1129{
1130	struct macb_device *macb = dev_get_priv(dev);
1131
1132	macb->next_rx_tail = macb->rx_tail;
1133	macb->wrapped = false;
1134
1135	return _macb_recv(macb, packetp);
1136}
1137
1138static int macb_free_pkt(struct udevice *dev, uchar *packet, int length)
1139{
1140	struct macb_device *macb = dev_get_priv(dev);
1141
1142	reclaim_rx_buffers(macb, macb->next_rx_tail);
1143
1144	return 0;
1145}
1146
1147static void macb_stop(struct udevice *dev)
1148{
1149	struct macb_device *macb = dev_get_priv(dev);
1150
1151	_macb_halt(macb);
1152}
1153
1154static int macb_write_hwaddr(struct udevice *dev)
1155{
1156	struct eth_pdata *plat = dev_get_plat(dev);
1157	struct macb_device *macb = dev_get_priv(dev);
1158
1159	return _macb_write_hwaddr(macb, plat->enetaddr);
1160}
1161
1162static const struct eth_ops macb_eth_ops = {
1163	.start	= macb_start,
1164	.send	= macb_send,
1165	.recv	= macb_recv,
1166	.stop	= macb_stop,
1167	.free_pkt	= macb_free_pkt,
1168	.write_hwaddr	= macb_write_hwaddr,
1169};
1170
1171#ifdef CONFIG_CLK
1172static int macb_enable_clk(struct udevice *dev)
1173{
1174	struct macb_device *macb = dev_get_priv(dev);
1175	struct clk clk;
1176	ulong clk_rate;
1177	int ret;
1178
1179	ret = clk_get_by_index(dev, 0, &clk);
1180	if (ret)
1181		return -EINVAL;
1182
1183	/*
1184	 * If clock driver didn't support enable or disable then
1185	 * we get -ENOSYS from clk_enable(). To handle this, we
1186	 * don't fail for ret == -ENOSYS.
1187	 */
1188	ret = clk_enable(&clk);
1189	if (ret && ret != -ENOSYS)
1190		return ret;
1191
1192	clk_rate = clk_get_rate(&clk);
1193	if (!clk_rate)
1194		return -EINVAL;
1195
1196	macb->pclk_rate = clk_rate;
1197
1198	return 0;
1199}
1200#endif
1201
1202static const struct macb_usrio_cfg macb_default_usrio = {
1203	.mii = MACB_BIT(MII),
1204	.rmii = MACB_BIT(RMII),
1205	.rgmii = GEM_BIT(RGMII),
1206	.clken = MACB_BIT(CLKEN),
1207};
1208
1209static struct macb_config default_gem_config = {
1210	.dma_burst_length = 16,
1211	.hw_dma_cap = HW_DMA_CAP_32B,
1212	.clk_init = NULL,
1213	.usrio = &macb_default_usrio,
1214};
1215
1216static int macb_eth_probe(struct udevice *dev)
1217{
1218	struct eth_pdata *pdata = dev_get_plat(dev);
1219	struct macb_device *macb = dev_get_priv(dev);
1220	struct ofnode_phandle_args phandle_args;
1221	int ret;
1222
1223	macb->phy_interface = dev_read_phy_mode(dev);
1224	if (macb->phy_interface == PHY_INTERFACE_MODE_NA)
1225		return -EINVAL;
1226
1227	/* Read phyaddr from DT */
1228	if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
1229					&phandle_args))
1230		macb->phy_addr = ofnode_read_u32_default(phandle_args.node,
1231							 "reg", -1);
1232
1233	macb->regs = (void *)(uintptr_t)pdata->iobase;
1234
1235	macb->is_big_endian = (cpu_to_be32(0x12345678) == 0x12345678);
1236
1237	macb->config = (struct macb_config *)dev_get_driver_data(dev);
1238	if (!macb->config) {
1239		if (IS_ENABLED(CONFIG_DMA_ADDR_T_64BIT)) {
1240			if (GEM_BFEXT(DAW64, gem_readl(macb, DCFG6)))
1241				default_gem_config.hw_dma_cap = HW_DMA_CAP_64B;
1242		}
1243		macb->config = &default_gem_config;
1244	}
1245
1246#ifdef CONFIG_CLK
1247	ret = macb_enable_clk(dev);
1248	if (ret)
1249		return ret;
1250#endif
1251
1252	_macb_eth_initialize(macb);
1253
1254#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
1255	macb->bus = mdio_alloc();
1256	if (!macb->bus)
1257		return -ENOMEM;
1258	strlcpy(macb->bus->name, dev->name, MDIO_NAME_LEN);
1259	macb->bus->read = macb_miiphy_read;
1260	macb->bus->write = macb_miiphy_write;
1261
1262	ret = mdio_register(macb->bus);
1263	if (ret < 0)
1264		return ret;
1265	macb->bus = miiphy_get_dev_by_name(dev->name);
1266#endif
1267
1268	return 0;
1269}
1270
1271static int macb_eth_remove(struct udevice *dev)
1272{
1273	struct macb_device *macb = dev_get_priv(dev);
1274
1275#ifdef CONFIG_PHYLIB
1276	free(macb->phydev);
1277#endif
1278	mdio_unregister(macb->bus);
1279	mdio_free(macb->bus);
1280
1281	return 0;
1282}
1283
1284/**
1285 * macb_late_eth_of_to_plat
1286 * @dev:	udevice struct
1287 * Returns 0 when operation success and negative errno number
1288 * when operation failed.
1289 */
1290int __weak macb_late_eth_of_to_plat(struct udevice *dev)
1291{
1292	return 0;
1293}
1294
1295static int macb_eth_of_to_plat(struct udevice *dev)
1296{
1297	struct eth_pdata *pdata = dev_get_plat(dev);
1298	struct macb_device *macb = dev_get_priv(dev);
1299	void *blob = (void *)gd->fdt_blob;
1300	int node = dev_of_offset(dev);
1301	int fl_node, speed_fdt;
1302
1303	/* fetch 'fixed-link' property */
1304	fl_node = fdt_subnode_offset(blob, node, "fixed-link");
1305	if (fl_node >= 0) {
1306		/* set phy_addr to invalid value for fixed link */
1307		macb->phy_addr = PHY_MAX_ADDR + 1;
1308		macb->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
1309		speed_fdt = fdtdec_get_int(blob, fl_node, "speed", 0);
1310		if (speed_fdt == 100) {
1311			macb->speed = 1;
1312		} else if (speed_fdt == 10) {
1313			macb->speed = 0;
1314		} else {
1315			printf("%s: The given speed %d of ethernet in the DT is not supported\n",
1316					__func__, speed_fdt);
1317			return -EINVAL;
1318		}
1319	}
1320
1321	pdata->iobase = (uintptr_t)dev_remap_addr(dev);
1322	if (!pdata->iobase)
1323		return -EINVAL;
1324
1325	return macb_late_eth_of_to_plat(dev);
1326}
1327
1328static const struct macb_usrio_cfg sama7g5_usrio = {
1329	.mii = 0,
1330	.rmii = 1,
1331	.rgmii = 2,
1332	.clken = BIT(2),
1333};
1334
1335static const struct macb_config sama5d4_config = {
1336	.dma_burst_length = 4,
1337	.hw_dma_cap = HW_DMA_CAP_32B,
1338	.clk_init = NULL,
1339	.usrio = &macb_default_usrio,
1340};
1341
1342static const struct macb_config sifive_config = {
1343	.dma_burst_length = 16,
1344	.hw_dma_cap = HW_DMA_CAP_32B,
1345	.clk_init = macb_sifive_clk_init,
1346	.usrio = &macb_default_usrio,
1347};
1348
1349static const struct macb_config sama7g5_gmac_config = {
1350	.dma_burst_length = 16,
1351	.hw_dma_cap = HW_DMA_CAP_32B,
1352	.clk_init = macb_sama7g5_clk_init,
1353	.usrio = &sama7g5_usrio,
1354};
1355
1356static const struct macb_config sama7g5_emac_config = {
1357	.caps = MACB_CAPS_USRIO_HAS_CLKEN,
1358	.dma_burst_length = 16,
1359	.hw_dma_cap = HW_DMA_CAP_32B,
1360	.usrio = &sama7g5_usrio,
1361};
1362
1363static const struct udevice_id macb_eth_ids[] = {
1364	{ .compatible = "cdns,macb" },
1365	{ .compatible = "cdns,at91sam9260-macb" },
1366	{ .compatible = "cdns,sam9x60-macb" },
1367	{ .compatible = "cdns,sama7g5-gem",
1368	  .data = (ulong)&sama7g5_gmac_config },
1369	{ .compatible = "cdns,sama7g5-emac",
1370	  .data = (ulong)&sama7g5_emac_config },
1371	{ .compatible = "atmel,sama5d2-gem" },
1372	{ .compatible = "atmel,sama5d3-gem" },
1373	{ .compatible = "atmel,sama5d4-gem", .data = (ulong)&sama5d4_config },
1374	{ .compatible = "cdns,zynq-gem" },
1375	{ .compatible = "sifive,fu540-c000-gem",
1376	  .data = (ulong)&sifive_config },
1377	{ }
1378};
1379
1380U_BOOT_DRIVER(eth_macb) = {
1381	.name	= "eth_macb",
1382	.id	= UCLASS_ETH,
1383	.of_match = macb_eth_ids,
1384	.of_to_plat = macb_eth_of_to_plat,
1385	.probe	= macb_eth_probe,
1386	.remove	= macb_eth_remove,
1387	.ops	= &macb_eth_ops,
1388	.priv_auto	= sizeof(struct macb_device),
1389	.plat_auto	= sizeof(struct eth_pdata),
1390};
1391#endif
1392