1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2005-2006 Atmel Corporation
4 */
5#include <common.h>
6#include <clk.h>
7#include <cpu_func.h>
8#include <dm.h>
9#include <log.h>
10#include <asm/global_data.h>
11#include <linux/delay.h>
12
13/*
14 * The u-boot networking stack is a little weird.  It seems like the
15 * networking core allocates receive buffers up front without any
16 * regard to the hardware that's supposed to actually receive those
17 * packets.
18 *
19 * The MACB receives packets into 128-byte receive buffers, so the
20 * buffers allocated by the core isn't very practical to use.  We'll
21 * allocate our own, but we need one such buffer in case a packet
22 * wraps around the DMA ring so that we have to copy it.
23 *
24 * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific
25 * configuration header.  This way, the core allocates one RX buffer
26 * and one TX buffer, each of which can hold a ethernet packet of
27 * maximum size.
28 *
29 * For some reason, the networking core unconditionally specifies a
30 * 32-byte packet "alignment" (which really should be called
31 * "padding").  MACB shouldn't need that, but we'll refrain from any
32 * core modifications here...
33 */
34
35#include <net.h>
36#include <malloc.h>
37#include <miiphy.h>
38
39#include <linux/mii.h>
40#include <asm/io.h>
41#include <linux/dma-mapping.h>
42#include <asm/arch/clk.h>
43#include <linux/errno.h>
44
45#include "macb.h"
46
47DECLARE_GLOBAL_DATA_PTR;
48
49/*
50 * These buffer sizes must be power of 2 and divisible
51 * by RX_BUFFER_MULTIPLE
52 */
53#define MACB_RX_BUFFER_SIZE		128
54#define GEM_RX_BUFFER_SIZE		2048
55#define RX_BUFFER_MULTIPLE		64
56
57#define MACB_RX_RING_SIZE		32
58#define MACB_TX_RING_SIZE		16
59
60#define MACB_TX_TIMEOUT		1000
61#define MACB_AUTONEG_TIMEOUT	5000000
62
63#ifdef CONFIG_MACB_ZYNQ
64/* INCR4 AHB bursts */
65#define MACB_ZYNQ_GEM_DMACR_BLENGTH		0x00000004
66/* Use full configured addressable space (8 Kb) */
67#define MACB_ZYNQ_GEM_DMACR_RXSIZE		0x00000300
68/* Use full configured addressable space (4 Kb) */
69#define MACB_ZYNQ_GEM_DMACR_TXSIZE		0x00000400
70/* Set RXBUF with use of 128 byte */
71#define MACB_ZYNQ_GEM_DMACR_RXBUF		0x00020000
72#define MACB_ZYNQ_GEM_DMACR_INIT \
73				(MACB_ZYNQ_GEM_DMACR_BLENGTH | \
74				MACB_ZYNQ_GEM_DMACR_RXSIZE | \
75				MACB_ZYNQ_GEM_DMACR_TXSIZE | \
76				MACB_ZYNQ_GEM_DMACR_RXBUF)
77#endif
78
79struct macb_dma_desc {
80	u32	addr;
81	u32	ctrl;
82};
83
84struct macb_dma_desc_64 {
85	u32 addrh;
86	u32 unused;
87};
88
89#define HW_DMA_CAP_32B		0
90#define HW_DMA_CAP_64B		1
91
92#define DMA_DESC_SIZE		16
93#define DMA_DESC_BYTES(n)	((n) * DMA_DESC_SIZE)
94#define MACB_TX_DMA_DESC_SIZE	(DMA_DESC_BYTES(MACB_TX_RING_SIZE))
95#define MACB_RX_DMA_DESC_SIZE	(DMA_DESC_BYTES(MACB_RX_RING_SIZE))
96#define MACB_TX_DUMMY_DMA_DESC_SIZE	(DMA_DESC_BYTES(1))
97
98#define DESC_PER_CACHELINE_32	(ARCH_DMA_MINALIGN/sizeof(struct macb_dma_desc))
99#define DESC_PER_CACHELINE_64	(ARCH_DMA_MINALIGN/DMA_DESC_SIZE)
100
101#define RXBUF_FRMLEN_MASK	0x00000fff
102#define TXBUF_FRMLEN_MASK	0x000007ff
103
104struct macb_device {
105	void			*regs;
106
107	bool			is_big_endian;
108
109	const struct macb_config *config;
110
111	unsigned int		rx_tail;
112	unsigned int		tx_head;
113	unsigned int		tx_tail;
114	unsigned int		next_rx_tail;
115	bool			wrapped;
116
117	void			*rx_buffer;
118	void			*tx_buffer;
119	struct macb_dma_desc	*rx_ring;
120	struct macb_dma_desc	*tx_ring;
121	size_t			rx_buffer_size;
122
123	unsigned long		rx_buffer_dma;
124	unsigned long		rx_ring_dma;
125	unsigned long		tx_ring_dma;
126
127	struct macb_dma_desc	*dummy_desc;
128	unsigned long		dummy_desc_dma;
129
130	const struct device	*dev;
131	unsigned int    	duplex;
132	unsigned int    	speed;
133	unsigned short		phy_addr;
134	struct mii_dev		*bus;
135#ifdef CONFIG_PHYLIB
136	struct phy_device	*phydev;
137#endif
138
139#ifdef CONFIG_CLK
140	unsigned long		pclk_rate;
141#endif
142	phy_interface_t		phy_interface;
143};
144
145struct macb_usrio_cfg {
146	unsigned int		mii;
147	unsigned int		rmii;
148	unsigned int		rgmii;
149	unsigned int		clken;
150};
151
152struct macb_config {
153	unsigned int		dma_burst_length;
154	unsigned int		hw_dma_cap;
155	unsigned int		caps;
156
157	int			(*clk_init)(struct udevice *dev, ulong rate);
158	const struct macb_usrio_cfg	*usrio;
159};
160
161static int macb_is_gem(struct macb_device *macb)
162{
163	return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) >= 0x2;
164}
165
166#ifndef cpu_is_sama5d2
167#define cpu_is_sama5d2() 0
168#endif
169
170#ifndef cpu_is_sama5d4
171#define cpu_is_sama5d4() 0
172#endif
173
174static int gem_is_gigabit_capable(struct macb_device *macb)
175{
176	/*
177	 * The GEM controllers embedded in SAMA5D2 and SAMA5D4 are
178	 * configured to support only 10/100.
179	 */
180	return macb_is_gem(macb) && !cpu_is_sama5d2() && !cpu_is_sama5d4();
181}
182
183/* Is the port a fixed link */
184static int macb_port_is_fixed_link(struct macb_device *macb)
185{
186	return macb->phy_addr > PHY_MAX_ADDR;
187}
188
189static void macb_mdio_write(struct macb_device *macb, u8 phy_adr, u8 reg,
190			    u16 value)
191{
192	unsigned long netctl;
193	unsigned long netstat;
194	unsigned long frame;
195
196	netctl = macb_readl(macb, NCR);
197	netctl |= MACB_BIT(MPE);
198	macb_writel(macb, NCR, netctl);
199
200	frame = (MACB_BF(SOF, 1)
201		 | MACB_BF(RW, 1)
202		 | MACB_BF(PHYA, phy_adr)
203		 | MACB_BF(REGA, reg)
204		 | MACB_BF(CODE, 2)
205		 | MACB_BF(DATA, value));
206	macb_writel(macb, MAN, frame);
207
208	do {
209		netstat = macb_readl(macb, NSR);
210	} while (!(netstat & MACB_BIT(IDLE)));
211
212	netctl = macb_readl(macb, NCR);
213	netctl &= ~MACB_BIT(MPE);
214	macb_writel(macb, NCR, netctl);
215}
216
217static u16 macb_mdio_read(struct macb_device *macb, u8 phy_adr, u8 reg)
218{
219	unsigned long netctl;
220	unsigned long netstat;
221	unsigned long frame;
222
223	netctl = macb_readl(macb, NCR);
224	netctl |= MACB_BIT(MPE);
225	macb_writel(macb, NCR, netctl);
226
227	frame = (MACB_BF(SOF, 1)
228		 | MACB_BF(RW, 2)
229		 | MACB_BF(PHYA, phy_adr)
230		 | MACB_BF(REGA, reg)
231		 | MACB_BF(CODE, 2));
232	macb_writel(macb, MAN, frame);
233
234	do {
235		netstat = macb_readl(macb, NSR);
236	} while (!(netstat & MACB_BIT(IDLE)));
237
238	frame = macb_readl(macb, MAN);
239
240	netctl = macb_readl(macb, NCR);
241	netctl &= ~MACB_BIT(MPE);
242	macb_writel(macb, NCR, netctl);
243
244	return MACB_BFEXT(DATA, frame);
245}
246
247void __weak arch_get_mdio_control(const char *name)
248{
249	return;
250}
251
252#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
253
254int macb_miiphy_read(struct mii_dev *bus, int phy_adr, int devad, int reg)
255{
256	u16 value = 0;
257	struct udevice *dev = eth_get_dev_by_name(bus->name);
258	struct macb_device *macb = dev_get_priv(dev);
259
260	arch_get_mdio_control(bus->name);
261	value = macb_mdio_read(macb, phy_adr, reg);
262
263	return value;
264}
265
266int macb_miiphy_write(struct mii_dev *bus, int phy_adr, int devad, int reg,
267		      u16 value)
268{
269	struct udevice *dev = eth_get_dev_by_name(bus->name);
270	struct macb_device *macb = dev_get_priv(dev);
271
272	arch_get_mdio_control(bus->name);
273	macb_mdio_write(macb, phy_adr, reg, value);
274
275	return 0;
276}
277#endif
278
279#define RX	1
280#define TX	0
281static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx)
282{
283	if (rx)
284		invalidate_dcache_range(macb->rx_ring_dma,
285			ALIGN(macb->rx_ring_dma + MACB_RX_DMA_DESC_SIZE,
286			      PKTALIGN));
287	else
288		invalidate_dcache_range(macb->tx_ring_dma,
289			ALIGN(macb->tx_ring_dma + MACB_TX_DMA_DESC_SIZE,
290			      PKTALIGN));
291}
292
293static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx)
294{
295	if (rx)
296		flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
297				   ALIGN(MACB_RX_DMA_DESC_SIZE, PKTALIGN));
298	else
299		flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
300				   ALIGN(MACB_TX_DMA_DESC_SIZE, PKTALIGN));
301}
302
303static inline void macb_flush_rx_buffer(struct macb_device *macb)
304{
305	flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
306			   ALIGN(macb->rx_buffer_size * MACB_RX_RING_SIZE,
307				 PKTALIGN));
308}
309
310static inline void macb_invalidate_rx_buffer(struct macb_device *macb)
311{
312	invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
313				ALIGN(macb->rx_buffer_size * MACB_RX_RING_SIZE,
314				      PKTALIGN));
315}
316
317#if defined(CONFIG_CMD_NET)
318
319static struct macb_dma_desc_64 *macb_64b_desc(struct macb_dma_desc *desc)
320{
321	return (struct macb_dma_desc_64 *)((void *)desc
322		+ sizeof(struct macb_dma_desc));
323}
324
325static void macb_set_addr(struct macb_device *macb, struct macb_dma_desc *desc,
326			  ulong addr)
327{
328	struct macb_dma_desc_64 *desc_64;
329
330	if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
331		desc_64 = macb_64b_desc(desc);
332		desc_64->addrh = upper_32_bits(addr);
333	}
334	desc->addr = lower_32_bits(addr);
335}
336
337static int _macb_send(struct macb_device *macb, const char *name, void *packet,
338		      int length)
339{
340	unsigned long paddr, ctrl;
341	unsigned int tx_head = macb->tx_head;
342	int i;
343
344	paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
345
346	ctrl = length & TXBUF_FRMLEN_MASK;
347	ctrl |= MACB_BIT(TX_LAST);
348	if (tx_head == (MACB_TX_RING_SIZE - 1)) {
349		ctrl |= MACB_BIT(TX_WRAP);
350		macb->tx_head = 0;
351	} else {
352		macb->tx_head++;
353	}
354
355	if (macb->config->hw_dma_cap & HW_DMA_CAP_64B)
356		tx_head = tx_head * 2;
357
358	macb->tx_ring[tx_head].ctrl = ctrl;
359	macb_set_addr(macb, &macb->tx_ring[tx_head], paddr);
360
361	barrier();
362	macb_flush_ring_desc(macb, TX);
363	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
364
365	/*
366	 * I guess this is necessary because the networking core may
367	 * re-use the transmit buffer as soon as we return...
368	 */
369	for (i = 0; i <= MACB_TX_TIMEOUT; i++) {
370		barrier();
371		macb_invalidate_ring_desc(macb, TX);
372		ctrl = macb->tx_ring[tx_head].ctrl;
373		if (ctrl & MACB_BIT(TX_USED))
374			break;
375		udelay(1);
376	}
377
378	dma_unmap_single(paddr, length, DMA_TO_DEVICE);
379
380	if (i <= MACB_TX_TIMEOUT) {
381		if (ctrl & MACB_BIT(TX_UNDERRUN))
382			printf("%s: TX underrun\n", name);
383		if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
384			printf("%s: TX buffers exhausted in mid frame\n", name);
385	} else {
386		printf("%s: TX timeout\n", name);
387	}
388
389	/* No one cares anyway */
390	return 0;
391}
392
393static void reclaim_rx_buffer(struct macb_device *macb,
394			      unsigned int idx)
395{
396	unsigned int mask;
397	unsigned int shift;
398	unsigned int i;
399
400	/*
401	 * There may be multiple descriptors per CPU cacheline,
402	 * so a cache flush would flush the whole line, meaning the content of other descriptors
403	 * in the cacheline would also flush. If one of the other descriptors had been
404	 * written to by the controller, the flush would cause those changes to be lost.
405	 *
406	 * To circumvent this issue, we do the actual freeing only when we need to free
407	 * the last descriptor in the current cacheline. When the current descriptor is the
408	 * last in the cacheline, we free all the descriptors that belong to that cacheline.
409	 */
410	if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
411		mask = DESC_PER_CACHELINE_64 - 1;
412		shift = 1;
413	} else {
414		mask = DESC_PER_CACHELINE_32 - 1;
415		shift = 0;
416	}
417
418	/* we exit without freeing if idx is not the last descriptor in the cacheline */
419	if ((idx & mask) != mask)
420		return;
421
422	for (i = idx & (~mask); i <= idx; i++)
423		macb->rx_ring[i << shift].addr &= ~MACB_BIT(RX_USED);
424}
425
426static void reclaim_rx_buffers(struct macb_device *macb,
427			       unsigned int new_tail)
428{
429	unsigned int i;
430
431	i = macb->rx_tail;
432
433	macb_invalidate_ring_desc(macb, RX);
434	while (i > new_tail) {
435		reclaim_rx_buffer(macb, i);
436		i++;
437		if (i >= MACB_RX_RING_SIZE)
438			i = 0;
439	}
440
441	while (i < new_tail) {
442		reclaim_rx_buffer(macb, i);
443		i++;
444	}
445
446	barrier();
447	macb_flush_ring_desc(macb, RX);
448	macb->rx_tail = new_tail;
449}
450
451static int _macb_recv(struct macb_device *macb, uchar **packetp)
452{
453	unsigned int next_rx_tail = macb->next_rx_tail;
454	void *buffer;
455	int length;
456	u32 status;
457	u8 flag = false;
458
459	macb->wrapped = false;
460	for (;;) {
461		macb_invalidate_ring_desc(macb, RX);
462
463		if (macb->config->hw_dma_cap & HW_DMA_CAP_64B)
464			next_rx_tail = next_rx_tail * 2;
465
466		if (!(macb->rx_ring[next_rx_tail].addr & MACB_BIT(RX_USED)))
467			return -EAGAIN;
468
469		status = macb->rx_ring[next_rx_tail].ctrl;
470		if (status & MACB_BIT(RX_SOF)) {
471			if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
472				next_rx_tail = next_rx_tail / 2;
473				flag = true;
474			}
475
476			if (next_rx_tail != macb->rx_tail)
477				reclaim_rx_buffers(macb, next_rx_tail);
478			macb->wrapped = false;
479		}
480
481		if (status & MACB_BIT(RX_EOF)) {
482			buffer = macb->rx_buffer +
483				macb->rx_buffer_size * macb->rx_tail;
484			length = status & RXBUF_FRMLEN_MASK;
485
486			macb_invalidate_rx_buffer(macb);
487			if (macb->wrapped) {
488				unsigned int headlen, taillen;
489
490				headlen = macb->rx_buffer_size *
491					(MACB_RX_RING_SIZE - macb->rx_tail);
492				taillen = length - headlen;
493				memcpy((void *)net_rx_packets[0],
494				       buffer, headlen);
495				memcpy((void *)net_rx_packets[0] + headlen,
496				       macb->rx_buffer, taillen);
497				*packetp = (void *)net_rx_packets[0];
498			} else {
499				*packetp = buffer;
500			}
501
502			if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
503				if (!flag)
504					next_rx_tail = next_rx_tail / 2;
505			}
506
507			if (++next_rx_tail >= MACB_RX_RING_SIZE)
508				next_rx_tail = 0;
509			macb->next_rx_tail = next_rx_tail;
510			return length;
511		} else {
512			if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
513				if (!flag)
514					next_rx_tail = next_rx_tail / 2;
515				flag = false;
516			}
517
518			if (++next_rx_tail >= MACB_RX_RING_SIZE) {
519				macb->wrapped = true;
520				next_rx_tail = 0;
521			}
522		}
523		barrier();
524	}
525}
526
527static void macb_phy_reset(struct macb_device *macb, const char *name)
528{
529	int i;
530	u16 status, adv;
531
532	adv = ADVERTISE_CSMA | ADVERTISE_ALL;
533	macb_mdio_write(macb, macb->phy_addr, MII_ADVERTISE, adv);
534	printf("%s: Starting autonegotiation...\n", name);
535	macb_mdio_write(macb, macb->phy_addr, MII_BMCR, (BMCR_ANENABLE
536					 | BMCR_ANRESTART));
537
538	for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
539		status = macb_mdio_read(macb, macb->phy_addr, MII_BMSR);
540		if (status & BMSR_ANEGCOMPLETE)
541			break;
542		udelay(100);
543	}
544
545	if (status & BMSR_ANEGCOMPLETE)
546		printf("%s: Autonegotiation complete\n", name);
547	else
548		printf("%s: Autonegotiation timed out (status=0x%04x)\n",
549		       name, status);
550}
551
552static int macb_phy_find(struct macb_device *macb, const char *name)
553{
554	int i;
555	u16 phy_id;
556
557	phy_id = macb_mdio_read(macb, macb->phy_addr, MII_PHYSID1);
558	if (phy_id != 0xffff) {
559		printf("%s: PHY present at %d\n", name, macb->phy_addr);
560		return 0;
561	}
562
563	/* Search for PHY... */
564	for (i = 0; i < 32; i++) {
565		macb->phy_addr = i;
566		phy_id = macb_mdio_read(macb, macb->phy_addr, MII_PHYSID1);
567		if (phy_id != 0xffff) {
568			printf("%s: PHY present at %d\n", name, i);
569			return 0;
570		}
571	}
572
573	/* PHY isn't up to snuff */
574	printf("%s: PHY not found\n", name);
575
576	return -ENODEV;
577}
578
579/**
580 * macb_linkspd_cb - Linkspeed change callback function
581 * @dev/@regs:	MACB udevice (DM version) or
582 *		Base Register of MACB devices (non-DM version)
583 * @speed:	Linkspeed
584 * Returns 0 when operation success and negative errno number
585 * when operation failed.
586 */
587static int macb_sifive_clk_init(struct udevice *dev, ulong rate)
588{
589	void *gemgxl_regs;
590
591	gemgxl_regs = dev_read_addr_index_ptr(dev, 1);
592	if (!gemgxl_regs)
593		return -ENODEV;
594
595	/*
596	 * SiFive GEMGXL TX clock operation mode:
597	 *
598	 * 0 = GMII mode. Use 125 MHz gemgxlclk from PRCI in TX logic
599	 *     and output clock on GMII output signal GTX_CLK
600	 * 1 = MII mode. Use MII input signal TX_CLK in TX logic
601	 */
602	writel(rate != 125000000, gemgxl_regs);
603	return 0;
604}
605
606static int macb_sama7g5_clk_init(struct udevice *dev, ulong rate)
607{
608	struct clk clk;
609	int ret;
610
611	ret = clk_get_by_name(dev, "tx_clk", &clk);
612	if (ret)
613		return ret;
614
615	/*
616	 * This is for using GCK. Clock rate is addressed via assigned-clock
617	 * property, so only clock enable is needed here. The switching to
618	 * proper clock rate depending on link speed is managed by IP logic.
619	 */
620	return clk_enable(&clk);
621}
622
623int __weak macb_linkspd_cb(struct udevice *dev, unsigned int speed)
624{
625#ifdef CONFIG_CLK
626	struct macb_device *macb = dev_get_priv(dev);
627	struct clk tx_clk;
628	ulong rate;
629	int ret;
630
631	switch (speed) {
632	case _10BASET:
633		rate = 2500000;		/* 2.5 MHz */
634		break;
635	case _100BASET:
636		rate = 25000000;	/* 25 MHz */
637		break;
638	case _1000BASET:
639		rate = 125000000;	/* 125 MHz */
640		break;
641	default:
642		/* does not change anything */
643		return 0;
644	}
645
646	if (macb->config->clk_init)
647		return macb->config->clk_init(dev, rate);
648
649	/*
650	 * "tx_clk" is an optional clock source for MACB.
651	 * Ignore if it does not exist in DT.
652	 */
653	ret = clk_get_by_name(dev, "tx_clk", &tx_clk);
654	if (ret)
655		return 0;
656
657	if (tx_clk.dev) {
658		ret = clk_set_rate(&tx_clk, rate);
659		if (ret < 0)
660			return ret;
661	}
662#endif
663
664	return 0;
665}
666
667static int macb_phy_init(struct udevice *dev, const char *name)
668{
669	struct macb_device *macb = dev_get_priv(dev);
670	u32 ncfgr;
671	u16 phy_id, status, adv, lpa;
672	int media, speed, duplex;
673	int ret;
674	int i;
675
676	arch_get_mdio_control(name);
677	/* If port is not fixed -> setup PHY */
678	if (!macb_port_is_fixed_link(macb)) {
679		/* Auto-detect phy_addr */
680		ret = macb_phy_find(macb, name);
681		if (ret)
682			return ret;
683
684		/* Check if the PHY is up to snuff... */
685		phy_id = macb_mdio_read(macb, macb->phy_addr, MII_PHYSID1);
686		if (phy_id == 0xffff) {
687			printf("%s: No PHY present\n", name);
688			return -ENODEV;
689		}
690
691#ifdef CONFIG_PHYLIB
692		macb->phydev = phy_connect(macb->bus, macb->phy_addr, dev,
693				     macb->phy_interface);
694		if (!macb->phydev) {
695			printf("phy_connect failed\n");
696			return -ENODEV;
697		}
698
699		phy_config(macb->phydev);
700#endif
701
702		status = macb_mdio_read(macb, macb->phy_addr, MII_BMSR);
703		if (!(status & BMSR_LSTATUS)) {
704			/* Try to re-negotiate if we don't have link already. */
705			macb_phy_reset(macb, name);
706
707			for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
708				status = macb_mdio_read(macb, macb->phy_addr, MII_BMSR);
709				if (status & BMSR_LSTATUS) {
710					/*
711					 * Delay a bit after the link is established,
712					 * so that the next xfer does not fail
713					 */
714					mdelay(10);
715					break;
716				}
717				udelay(100);
718			}
719		}
720
721		if (!(status & BMSR_LSTATUS)) {
722			printf("%s: link down (status: 0x%04x)\n",
723			       name, status);
724			return -ENETDOWN;
725		}
726
727		/* First check for GMAC and that it is GiB capable */
728		if (gem_is_gigabit_capable(macb)) {
729			lpa = macb_mdio_read(macb, macb->phy_addr, MII_STAT1000);
730
731			if (lpa & (LPA_1000FULL | LPA_1000HALF | LPA_1000XFULL |
732						LPA_1000XHALF)) {
733				duplex = ((lpa & (LPA_1000FULL | LPA_1000XFULL)) ?
734						1 : 0);
735
736				printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n",
737					name,
738					duplex ? "full" : "half",
739					lpa);
740
741				ncfgr = macb_readl(macb, NCFGR);
742				ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
743				ncfgr |= GEM_BIT(GBE);
744
745				if (duplex)
746					ncfgr |= MACB_BIT(FD);
747
748				macb_writel(macb, NCFGR, ncfgr);
749
750				ret = macb_linkspd_cb(dev, _1000BASET);
751				if (ret)
752					return ret;
753
754				return 0;
755			}
756		}
757
758		/* fall back for EMAC checking */
759		adv = macb_mdio_read(macb, macb->phy_addr, MII_ADVERTISE);
760		lpa = macb_mdio_read(macb, macb->phy_addr, MII_LPA);
761		media = mii_nway_result(lpa & adv);
762		speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
763			? 1 : 0);
764		duplex = (media & ADVERTISE_FULL) ? 1 : 0;
765		printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
766			name,
767			speed ? "100" : "10",
768			duplex ? "full" : "half",
769			lpa);
770	} else {
771		/* if macb port is a fixed link */
772		/* TODO : manage gigabit capable processors */
773		speed = macb->speed;
774		duplex = macb->duplex;
775		printf("%s: link up, %sMbps %s-duplex\n",
776			name,
777			speed ? "100" : "10",
778			duplex ? "full" : "half");
779	}
780
781	ncfgr = macb_readl(macb, NCFGR);
782	ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE));
783	if (speed) {
784		ncfgr |= MACB_BIT(SPD);
785		ret = macb_linkspd_cb(dev, _100BASET);
786	} else {
787		ret = macb_linkspd_cb(dev, _10BASET);
788	}
789
790	if (ret)
791		return ret;
792
793	if (duplex)
794		ncfgr |= MACB_BIT(FD);
795	macb_writel(macb, NCFGR, ncfgr);
796
797	return 0;
798}
799
800static int gmac_init_multi_queues(struct macb_device *macb)
801{
802	int i, num_queues = 1;
803	u32 queue_mask;
804	unsigned long paddr;
805
806	/* bit 0 is never set but queue 0 always exists */
807	queue_mask = gem_readl(macb, DCFG6) & 0xff;
808	queue_mask |= 0x1;
809
810	for (i = 1; i < MACB_MAX_QUEUES; i++)
811		if (queue_mask & (1 << i))
812			num_queues++;
813
814	macb->dummy_desc->ctrl = MACB_BIT(TX_USED);
815	macb->dummy_desc->addr = 0;
816	flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma +
817			ALIGN(MACB_TX_DUMMY_DMA_DESC_SIZE, PKTALIGN));
818	paddr = macb->dummy_desc_dma;
819
820	for (i = 1; i < num_queues; i++) {
821		gem_writel_queue_TBQP(macb, lower_32_bits(paddr), i - 1);
822		gem_writel_queue_RBQP(macb, lower_32_bits(paddr), i - 1);
823		if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
824			gem_writel_queue_TBQPH(macb, upper_32_bits(paddr),
825					       i - 1);
826			gem_writel_queue_RBQPH(macb, upper_32_bits(paddr),
827					       i - 1);
828		}
829	}
830	return 0;
831}
832
833static void gmac_configure_dma(struct macb_device *macb)
834{
835	u32 buffer_size;
836	u32 dmacfg;
837
838	buffer_size = macb->rx_buffer_size / RX_BUFFER_MULTIPLE;
839	dmacfg = gem_readl(macb, DMACFG) & ~GEM_BF(RXBS, -1L);
840	dmacfg |= GEM_BF(RXBS, buffer_size);
841
842	if (macb->config->dma_burst_length)
843		dmacfg = GEM_BFINS(FBLDO,
844				   macb->config->dma_burst_length, dmacfg);
845
846	dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
847	dmacfg &= ~GEM_BIT(ENDIA_PKT);
848
849	if (macb->is_big_endian)
850		dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
851	else
852		dmacfg &= ~GEM_BIT(ENDIA_DESC);
853
854	dmacfg &= ~GEM_BIT(ADDR64);
855	if (macb->config->hw_dma_cap & HW_DMA_CAP_64B)
856		dmacfg |= GEM_BIT(ADDR64);
857
858	gem_writel(macb, DMACFG, dmacfg);
859}
860
861static int _macb_init(struct udevice *dev, const char *name)
862{
863	struct macb_device *macb = dev_get_priv(dev);
864	unsigned int val = 0;
865	unsigned long paddr;
866	int ret;
867	int i;
868	int count;
869
870	/*
871	 * macb_halt should have been called at some point before now,
872	 * so we'll assume the controller is idle.
873	 */
874
875	/* initialize DMA descriptors */
876	paddr = macb->rx_buffer_dma;
877	for (i = 0; i < MACB_RX_RING_SIZE; i++) {
878		if (i == (MACB_RX_RING_SIZE - 1))
879			paddr |= MACB_BIT(RX_WRAP);
880		if (macb->config->hw_dma_cap & HW_DMA_CAP_64B)
881			count = i * 2;
882		else
883			count = i;
884		macb->rx_ring[count].ctrl = 0;
885		macb_set_addr(macb, &macb->rx_ring[count], paddr);
886		paddr += macb->rx_buffer_size;
887	}
888	macb_flush_ring_desc(macb, RX);
889	macb_flush_rx_buffer(macb);
890
891	for (i = 0; i < MACB_TX_RING_SIZE; i++) {
892		if (macb->config->hw_dma_cap & HW_DMA_CAP_64B)
893			count = i * 2;
894		else
895			count = i;
896		macb_set_addr(macb, &macb->tx_ring[count], 0);
897		if (i == (MACB_TX_RING_SIZE - 1))
898			macb->tx_ring[count].ctrl = MACB_BIT(TX_USED) |
899				MACB_BIT(TX_WRAP);
900		else
901			macb->tx_ring[count].ctrl = MACB_BIT(TX_USED);
902	}
903	macb_flush_ring_desc(macb, TX);
904
905	macb->rx_tail = 0;
906	macb->tx_head = 0;
907	macb->tx_tail = 0;
908	macb->next_rx_tail = 0;
909
910#ifdef CONFIG_MACB_ZYNQ
911	gem_writel(macb, DMACFG, MACB_ZYNQ_GEM_DMACR_INIT);
912#endif
913
914	macb_writel(macb, RBQP, lower_32_bits(macb->rx_ring_dma));
915	macb_writel(macb, TBQP, lower_32_bits(macb->tx_ring_dma));
916	if (macb->config->hw_dma_cap & HW_DMA_CAP_64B) {
917		macb_writel(macb, RBQPH, upper_32_bits(macb->rx_ring_dma));
918		macb_writel(macb, TBQPH, upper_32_bits(macb->tx_ring_dma));
919	}
920
921	if (macb_is_gem(macb)) {
922		/* Initialize DMA properties */
923		gmac_configure_dma(macb);
924		/* Check the multi queue and initialize the queue for tx */
925		gmac_init_multi_queues(macb);
926
927		/*
928		 * When the GMAC IP with GE feature, this bit is used to
929		 * select interface between RGMII and GMII.
930		 * When the GMAC IP without GE feature, this bit is used
931		 * to select interface between RMII and MII.
932		 */
933		if (macb->phy_interface == PHY_INTERFACE_MODE_RGMII ||
934		    macb->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
935		    macb->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
936		    macb->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
937			val = macb->config->usrio->rgmii;
938		else if (macb->phy_interface == PHY_INTERFACE_MODE_RMII)
939			val = macb->config->usrio->rmii;
940		else if (macb->phy_interface == PHY_INTERFACE_MODE_MII)
941			val = macb->config->usrio->mii;
942
943		if (macb->config->caps & MACB_CAPS_USRIO_HAS_CLKEN)
944			val |= macb->config->usrio->clken;
945
946		gem_writel(macb, USRIO, val);
947
948		if (macb->phy_interface == PHY_INTERFACE_MODE_SGMII) {
949			unsigned int ncfgr = macb_readl(macb, NCFGR);
950
951			ncfgr |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
952			macb_writel(macb, NCFGR, ncfgr);
953		}
954	} else {
955	/* choose RMII or MII mode. This depends on the board */
956#ifdef CONFIG_AT91FAMILY
957		if (macb->phy_interface == PHY_INTERFACE_MODE_RMII) {
958			macb_writel(macb, USRIO,
959				    macb->config->usrio->rmii |
960				    macb->config->usrio->clken);
961		} else {
962			macb_writel(macb, USRIO, macb->config->usrio->clken);
963		}
964#else
965		if (macb->phy_interface == PHY_INTERFACE_MODE_RMII)
966			macb_writel(macb, USRIO, 0);
967		else
968			macb_writel(macb, USRIO, macb->config->usrio->mii);
969#endif
970	}
971
972	ret = macb_phy_init(dev, name);
973	if (ret)
974		return ret;
975
976	/* Enable TX and RX */
977	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
978
979	return 0;
980}
981
982static void _macb_halt(struct macb_device *macb)
983{
984	u32 ncr, tsr;
985
986	/* Halt the controller and wait for any ongoing transmission to end. */
987	ncr = macb_readl(macb, NCR);
988	ncr |= MACB_BIT(THALT);
989	macb_writel(macb, NCR, ncr);
990
991	do {
992		tsr = macb_readl(macb, TSR);
993	} while (tsr & MACB_BIT(TGO));
994
995	/* Disable TX and RX, and clear statistics */
996	macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
997}
998
999static int _macb_write_hwaddr(struct macb_device *macb, unsigned char *enetaddr)
1000{
1001	u32 hwaddr_bottom;
1002	u16 hwaddr_top;
1003
1004	/* set hardware address */
1005	hwaddr_bottom = enetaddr[0] | enetaddr[1] << 8 |
1006			enetaddr[2] << 16 | enetaddr[3] << 24;
1007	macb_writel(macb, SA1B, hwaddr_bottom);
1008	hwaddr_top = enetaddr[4] | enetaddr[5] << 8;
1009	macb_writel(macb, SA1T, hwaddr_top);
1010	return 0;
1011}
1012
1013static u32 macb_mdc_clk_div(int id, struct macb_device *macb)
1014{
1015	u32 config;
1016#if defined(CONFIG_CLK)
1017	unsigned long macb_hz = macb->pclk_rate;
1018#else
1019	unsigned long macb_hz = get_macb_pclk_rate(id);
1020#endif
1021
1022	if (macb_hz < 20000000)
1023		config = MACB_BF(CLK, MACB_CLK_DIV8);
1024	else if (macb_hz < 40000000)
1025		config = MACB_BF(CLK, MACB_CLK_DIV16);
1026	else if (macb_hz < 80000000)
1027		config = MACB_BF(CLK, MACB_CLK_DIV32);
1028	else
1029		config = MACB_BF(CLK, MACB_CLK_DIV64);
1030
1031	return config;
1032}
1033
1034static u32 gem_mdc_clk_div(int id, struct macb_device *macb)
1035{
1036	u32 config;
1037
1038#if defined(CONFIG_CLK)
1039	unsigned long macb_hz = macb->pclk_rate;
1040#else
1041	unsigned long macb_hz = get_macb_pclk_rate(id);
1042#endif
1043
1044	if (macb_hz < 20000000)
1045		config = GEM_BF(CLK, GEM_CLK_DIV8);
1046	else if (macb_hz < 40000000)
1047		config = GEM_BF(CLK, GEM_CLK_DIV16);
1048	else if (macb_hz < 80000000)
1049		config = GEM_BF(CLK, GEM_CLK_DIV32);
1050	else if (macb_hz < 120000000)
1051		config = GEM_BF(CLK, GEM_CLK_DIV48);
1052	else if (macb_hz < 160000000)
1053		config = GEM_BF(CLK, GEM_CLK_DIV64);
1054	else if (macb_hz < 240000000)
1055		config = GEM_BF(CLK, GEM_CLK_DIV96);
1056	else if (macb_hz < 320000000)
1057		config = GEM_BF(CLK, GEM_CLK_DIV128);
1058	else
1059		config = GEM_BF(CLK, GEM_CLK_DIV224);
1060
1061	return config;
1062}
1063
1064/*
1065 * Get the DMA bus width field of the network configuration register that we
1066 * should program. We find the width from decoding the design configuration
1067 * register to find the maximum supported data bus width.
1068 */
1069static u32 macb_dbw(struct macb_device *macb)
1070{
1071	switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) {
1072	case 4:
1073		return GEM_BF(DBW, GEM_DBW128);
1074	case 2:
1075		return GEM_BF(DBW, GEM_DBW64);
1076	case 1:
1077	default:
1078		return GEM_BF(DBW, GEM_DBW32);
1079	}
1080}
1081
1082static void _macb_eth_initialize(struct macb_device *macb)
1083{
1084	int id = 0;	/* This is not used by functions we call */
1085	u32 ncfgr;
1086
1087	if (macb_is_gem(macb))
1088		macb->rx_buffer_size = GEM_RX_BUFFER_SIZE;
1089	else
1090		macb->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1091
1092	/* TODO: we need check the rx/tx_ring_dma is dcache line aligned */
1093	macb->rx_buffer = dma_alloc_coherent(macb->rx_buffer_size *
1094					     MACB_RX_RING_SIZE,
1095					     &macb->rx_buffer_dma);
1096	macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE,
1097					   &macb->rx_ring_dma);
1098	macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE,
1099					   &macb->tx_ring_dma);
1100	macb->dummy_desc = dma_alloc_coherent(MACB_TX_DUMMY_DMA_DESC_SIZE,
1101					   &macb->dummy_desc_dma);
1102
1103	/*
1104	 * Do some basic initialization so that we at least can talk
1105	 * to the PHY
1106	 */
1107	if (macb_is_gem(macb)) {
1108		ncfgr = gem_mdc_clk_div(id, macb);
1109		ncfgr |= macb_dbw(macb);
1110	} else {
1111		ncfgr = macb_mdc_clk_div(id, macb);
1112	}
1113
1114	macb_writel(macb, NCFGR, ncfgr);
1115}
1116
1117static int macb_start(struct udevice *dev)
1118{
1119	return _macb_init(dev, dev->name);
1120}
1121
1122static int macb_send(struct udevice *dev, void *packet, int length)
1123{
1124	struct macb_device *macb = dev_get_priv(dev);
1125
1126	return _macb_send(macb, dev->name, packet, length);
1127}
1128
1129static int macb_recv(struct udevice *dev, int flags, uchar **packetp)
1130{
1131	struct macb_device *macb = dev_get_priv(dev);
1132
1133	macb->next_rx_tail = macb->rx_tail;
1134	macb->wrapped = false;
1135
1136	return _macb_recv(macb, packetp);
1137}
1138
1139static int macb_free_pkt(struct udevice *dev, uchar *packet, int length)
1140{
1141	struct macb_device *macb = dev_get_priv(dev);
1142
1143	reclaim_rx_buffers(macb, macb->next_rx_tail);
1144
1145	return 0;
1146}
1147
1148static void macb_stop(struct udevice *dev)
1149{
1150	struct macb_device *macb = dev_get_priv(dev);
1151
1152	_macb_halt(macb);
1153}
1154
1155static int macb_write_hwaddr(struct udevice *dev)
1156{
1157	struct eth_pdata *plat = dev_get_plat(dev);
1158	struct macb_device *macb = dev_get_priv(dev);
1159
1160	return _macb_write_hwaddr(macb, plat->enetaddr);
1161}
1162
1163static const struct eth_ops macb_eth_ops = {
1164	.start	= macb_start,
1165	.send	= macb_send,
1166	.recv	= macb_recv,
1167	.stop	= macb_stop,
1168	.free_pkt	= macb_free_pkt,
1169	.write_hwaddr	= macb_write_hwaddr,
1170};
1171
1172#ifdef CONFIG_CLK
1173static int macb_enable_clk(struct udevice *dev)
1174{
1175	struct macb_device *macb = dev_get_priv(dev);
1176	struct clk clk;
1177	ulong clk_rate;
1178	int ret;
1179
1180	ret = clk_get_by_index(dev, 0, &clk);
1181	if (ret)
1182		return -EINVAL;
1183
1184	/*
1185	 * If clock driver didn't support enable or disable then
1186	 * we get -ENOSYS from clk_enable(). To handle this, we
1187	 * don't fail for ret == -ENOSYS.
1188	 */
1189	ret = clk_enable(&clk);
1190	if (ret && ret != -ENOSYS)
1191		return ret;
1192
1193	clk_rate = clk_get_rate(&clk);
1194	if (!clk_rate)
1195		return -EINVAL;
1196
1197	macb->pclk_rate = clk_rate;
1198
1199	return 0;
1200}
1201#endif
1202
1203static const struct macb_usrio_cfg macb_default_usrio = {
1204	.mii = MACB_BIT(MII),
1205	.rmii = MACB_BIT(RMII),
1206	.rgmii = GEM_BIT(RGMII),
1207	.clken = MACB_BIT(CLKEN),
1208};
1209
1210static struct macb_config default_gem_config = {
1211	.dma_burst_length = 16,
1212	.hw_dma_cap = HW_DMA_CAP_32B,
1213	.clk_init = NULL,
1214	.usrio = &macb_default_usrio,
1215};
1216
1217static int macb_eth_probe(struct udevice *dev)
1218{
1219	struct eth_pdata *pdata = dev_get_plat(dev);
1220	struct macb_device *macb = dev_get_priv(dev);
1221	struct ofnode_phandle_args phandle_args;
1222	int ret;
1223
1224	macb->phy_interface = dev_read_phy_mode(dev);
1225	if (macb->phy_interface == PHY_INTERFACE_MODE_NA)
1226		return -EINVAL;
1227
1228	/* Read phyaddr from DT */
1229	if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
1230					&phandle_args))
1231		macb->phy_addr = ofnode_read_u32_default(phandle_args.node,
1232							 "reg", -1);
1233
1234	macb->regs = (void *)(uintptr_t)pdata->iobase;
1235
1236	macb->is_big_endian = (cpu_to_be32(0x12345678) == 0x12345678);
1237
1238	macb->config = (struct macb_config *)dev_get_driver_data(dev);
1239	if (!macb->config) {
1240		if (IS_ENABLED(CONFIG_DMA_ADDR_T_64BIT)) {
1241			if (GEM_BFEXT(DAW64, gem_readl(macb, DCFG6)))
1242				default_gem_config.hw_dma_cap = HW_DMA_CAP_64B;
1243		}
1244		macb->config = &default_gem_config;
1245	}
1246
1247#ifdef CONFIG_CLK
1248	ret = macb_enable_clk(dev);
1249	if (ret)
1250		return ret;
1251#endif
1252
1253	_macb_eth_initialize(macb);
1254
1255#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
1256	macb->bus = mdio_alloc();
1257	if (!macb->bus)
1258		return -ENOMEM;
1259	strlcpy(macb->bus->name, dev->name, MDIO_NAME_LEN);
1260	macb->bus->read = macb_miiphy_read;
1261	macb->bus->write = macb_miiphy_write;
1262
1263	ret = mdio_register(macb->bus);
1264	if (ret < 0)
1265		return ret;
1266	macb->bus = miiphy_get_dev_by_name(dev->name);
1267#endif
1268
1269	return 0;
1270}
1271
1272static int macb_eth_remove(struct udevice *dev)
1273{
1274	struct macb_device *macb = dev_get_priv(dev);
1275
1276#ifdef CONFIG_PHYLIB
1277	free(macb->phydev);
1278#endif
1279	mdio_unregister(macb->bus);
1280	mdio_free(macb->bus);
1281
1282	return 0;
1283}
1284
1285/**
1286 * macb_late_eth_of_to_plat
1287 * @dev:	udevice struct
1288 * Returns 0 when operation success and negative errno number
1289 * when operation failed.
1290 */
1291int __weak macb_late_eth_of_to_plat(struct udevice *dev)
1292{
1293	return 0;
1294}
1295
1296static int macb_eth_of_to_plat(struct udevice *dev)
1297{
1298	struct eth_pdata *pdata = dev_get_plat(dev);
1299	struct macb_device *macb = dev_get_priv(dev);
1300	void *blob = (void *)gd->fdt_blob;
1301	int node = dev_of_offset(dev);
1302	int fl_node, speed_fdt;
1303
1304	/* fetch 'fixed-link' property */
1305	fl_node = fdt_subnode_offset(blob, node, "fixed-link");
1306	if (fl_node >= 0) {
1307		/* set phy_addr to invalid value for fixed link */
1308		macb->phy_addr = PHY_MAX_ADDR + 1;
1309		macb->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
1310		speed_fdt = fdtdec_get_int(blob, fl_node, "speed", 0);
1311		if (speed_fdt == 100) {
1312			macb->speed = 1;
1313		} else if (speed_fdt == 10) {
1314			macb->speed = 0;
1315		} else {
1316			printf("%s: The given speed %d of ethernet in the DT is not supported\n",
1317					__func__, speed_fdt);
1318			return -EINVAL;
1319		}
1320	}
1321
1322	pdata->iobase = (uintptr_t)dev_remap_addr(dev);
1323	if (!pdata->iobase)
1324		return -EINVAL;
1325
1326	return macb_late_eth_of_to_plat(dev);
1327}
1328
1329static const struct macb_usrio_cfg sama7g5_usrio = {
1330	.mii = 0,
1331	.rmii = 1,
1332	.rgmii = 2,
1333	.clken = BIT(2),
1334};
1335
1336static const struct macb_config sama5d4_config = {
1337	.dma_burst_length = 4,
1338	.hw_dma_cap = HW_DMA_CAP_32B,
1339	.clk_init = NULL,
1340	.usrio = &macb_default_usrio,
1341};
1342
1343static const struct macb_config sifive_config = {
1344	.dma_burst_length = 16,
1345	.hw_dma_cap = HW_DMA_CAP_32B,
1346	.clk_init = macb_sifive_clk_init,
1347	.usrio = &macb_default_usrio,
1348};
1349
1350static const struct macb_config sama7g5_gmac_config = {
1351	.dma_burst_length = 16,
1352	.hw_dma_cap = HW_DMA_CAP_32B,
1353	.clk_init = macb_sama7g5_clk_init,
1354	.usrio = &sama7g5_usrio,
1355};
1356
1357static const struct macb_config sama7g5_emac_config = {
1358	.caps = MACB_CAPS_USRIO_HAS_CLKEN,
1359	.dma_burst_length = 16,
1360	.hw_dma_cap = HW_DMA_CAP_32B,
1361	.usrio = &sama7g5_usrio,
1362};
1363
1364static const struct udevice_id macb_eth_ids[] = {
1365	{ .compatible = "cdns,macb" },
1366	{ .compatible = "cdns,at91sam9260-macb" },
1367	{ .compatible = "cdns,sam9x60-macb" },
1368	{ .compatible = "cdns,sama7g5-gem",
1369	  .data = (ulong)&sama7g5_gmac_config },
1370	{ .compatible = "cdns,sama7g5-emac",
1371	  .data = (ulong)&sama7g5_emac_config },
1372	{ .compatible = "atmel,sama5d2-gem" },
1373	{ .compatible = "atmel,sama5d3-gem" },
1374	{ .compatible = "atmel,sama5d4-gem", .data = (ulong)&sama5d4_config },
1375	{ .compatible = "cdns,zynq-gem" },
1376	{ .compatible = "sifive,fu540-c000-gem",
1377	  .data = (ulong)&sifive_config },
1378	{ }
1379};
1380
1381U_BOOT_DRIVER(eth_macb) = {
1382	.name	= "eth_macb",
1383	.id	= UCLASS_ETH,
1384	.of_match = macb_eth_ids,
1385	.of_to_plat = macb_eth_of_to_plat,
1386	.probe	= macb_eth_probe,
1387	.remove	= macb_eth_remove,
1388	.ops	= &macb_eth_ops,
1389	.priv_auto	= sizeof(struct macb_device),
1390	.plat_auto	= sizeof(struct eth_pdata),
1391};
1392#endif
1393