1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
9#include <cpu_func.h>
10#include <dm.h>
11#include <log.h>
12#include <malloc.h>
13#include <miiphy.h>
14#include <net.h>
15#include <regmap.h>
16#include <reset.h>
17#include <syscon.h>
18#include <wait_bit.h>
19#include <asm/cache.h>
20#include <asm/gpio.h>
21#include <asm/io.h>
22#include <dm/device_compat.h>
23#include <linux/delay.h>
24#include <linux/err.h>
25#include <linux/ioport.h>
26#include <linux/mdio.h>
27#include <linux/mii.h>
28#include <linux/printk.h>
29
30#include "mtk_eth.h"
31
32#define NUM_TX_DESC		24
33#define NUM_RX_DESC		24
34#define TX_TOTAL_BUF_SIZE	(NUM_TX_DESC * PKTSIZE_ALIGN)
35#define RX_TOTAL_BUF_SIZE	(NUM_RX_DESC * PKTSIZE_ALIGN)
36#define TOTAL_PKT_BUF_SIZE	(TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
37
38#define MT753X_NUM_PHYS		5
39#define MT753X_NUM_PORTS	7
40#define MT753X_DFL_SMI_ADDR	31
41#define MT753X_SMI_ADDR_MASK	0x1f
42
43#define MT753X_PHY_ADDR(base, addr) \
44	(((base) + (addr)) & 0x1f)
45
46#define GDMA_FWD_TO_CPU \
47	(0x20000000 | \
48	GDM_ICS_EN | \
49	GDM_TCS_EN | \
50	GDM_UCS_EN | \
51	STRP_CRC | \
52	(DP_PDMA << MYMAC_DP_S) | \
53	(DP_PDMA << BC_DP_S) | \
54	(DP_PDMA << MC_DP_S) | \
55	(DP_PDMA << UN_DP_S))
56
57#define GDMA_BRIDGE_TO_CPU \
58	(0xC0000000 | \
59	GDM_ICS_EN | \
60	GDM_TCS_EN | \
61	GDM_UCS_EN | \
62	(DP_PDMA << MYMAC_DP_S) | \
63	(DP_PDMA << BC_DP_S) | \
64	(DP_PDMA << MC_DP_S) | \
65	(DP_PDMA << UN_DP_S))
66
67#define GDMA_FWD_DISCARD \
68	(0x20000000 | \
69	GDM_ICS_EN | \
70	GDM_TCS_EN | \
71	GDM_UCS_EN | \
72	STRP_CRC | \
73	(DP_DISCARD << MYMAC_DP_S) | \
74	(DP_DISCARD << BC_DP_S) | \
75	(DP_DISCARD << MC_DP_S) | \
76	(DP_DISCARD << UN_DP_S))
77
78enum mtk_switch {
79	SW_NONE,
80	SW_MT7530,
81	SW_MT7531,
82	SW_MT7988,
83};
84
85/* struct mtk_soc_data -	This is the structure holding all differences
86 *				among various plaforms
87 * @caps			Flags shown the extra capability for the SoC
88 * @ana_rgc3:			The offset for register ANA_RGC3 related to
89 *				sgmiisys syscon
90 * @gdma_count:			Number of GDMAs
91 * @pdma_base:			Register base of PDMA block
92 * @txd_size:			Tx DMA descriptor size.
93 * @rxd_size:			Rx DMA descriptor size.
94 */
95struct mtk_soc_data {
96	u32 caps;
97	u32 ana_rgc3;
98	u32 gdma_count;
99	u32 pdma_base;
100	u32 txd_size;
101	u32 rxd_size;
102};
103
104struct mtk_eth_priv {
105	char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
106
107	void *tx_ring_noc;
108	void *rx_ring_noc;
109
110	int rx_dma_owner_idx0;
111	int tx_cpu_owner_idx0;
112
113	void __iomem *fe_base;
114	void __iomem *gmac_base;
115	void __iomem *sgmii_base;
116	void __iomem *gsw_base;
117
118	struct regmap *ethsys_regmap;
119
120	struct regmap *infra_regmap;
121
122	struct regmap *usxgmii_regmap;
123	struct regmap *xfi_pextp_regmap;
124	struct regmap *xfi_pll_regmap;
125	struct regmap *toprgu_regmap;
126
127	struct mii_dev *mdio_bus;
128	int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
129	int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
130	int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
131	int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
132			 u16 val);
133
134	const struct mtk_soc_data *soc;
135	int gmac_id;
136	int force_mode;
137	int speed;
138	int duplex;
139	int mdc;
140	bool pn_swap;
141
142	struct phy_device *phydev;
143	int phy_interface;
144	int phy_addr;
145
146	enum mtk_switch sw;
147	int (*switch_init)(struct mtk_eth_priv *priv);
148	void (*switch_mac_control)(struct mtk_eth_priv *priv, bool enable);
149	u32 mt753x_smi_addr;
150	u32 mt753x_phy_base;
151	u32 mt753x_pmcr;
152	u32 mt753x_reset_wait_time;
153
154	struct gpio_desc rst_gpio;
155	int mcm;
156
157	struct reset_ctl rst_fe;
158	struct reset_ctl rst_mcm;
159};
160
161static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
162{
163	writel(val, priv->fe_base + priv->soc->pdma_base + reg);
164}
165
166static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
167			 u32 set)
168{
169	clrsetbits_le32(priv->fe_base + priv->soc->pdma_base + reg, clr, set);
170}
171
172static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
173			   u32 val)
174{
175	u32 gdma_base;
176
177	if (no == 2)
178		gdma_base = GDMA3_BASE;
179	else if (no == 1)
180		gdma_base = GDMA2_BASE;
181	else
182		gdma_base = GDMA1_BASE;
183
184	writel(val, priv->fe_base + gdma_base + reg);
185}
186
187static void mtk_fe_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
188{
189	clrsetbits_le32(priv->fe_base + reg, clr, set);
190}
191
192static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
193{
194	return readl(priv->gmac_base + reg);
195}
196
197static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
198{
199	writel(val, priv->gmac_base + reg);
200}
201
202static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
203{
204	clrsetbits_le32(priv->gmac_base + reg, clr, set);
205}
206
207static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
208			   u32 set)
209{
210	uint val;
211
212	regmap_read(priv->ethsys_regmap, reg, &val);
213	val &= ~clr;
214	val |= set;
215	regmap_write(priv->ethsys_regmap, reg, val);
216}
217
218static void mtk_infra_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
219			  u32 set)
220{
221	uint val;
222
223	regmap_read(priv->infra_regmap, reg, &val);
224	val &= ~clr;
225	val |= set;
226	regmap_write(priv->infra_regmap, reg, val);
227}
228
229static u32 mtk_gsw_read(struct mtk_eth_priv *priv, u32 reg)
230{
231	return readl(priv->gsw_base + reg);
232}
233
234static void mtk_gsw_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
235{
236	writel(val, priv->gsw_base + reg);
237}
238
239/* Direct MDIO clause 22/45 access via SoC */
240static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
241		      u32 cmd, u32 st)
242{
243	int ret;
244	u32 val;
245
246	val = (st << MDIO_ST_S) |
247	      ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
248	      (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
249	      (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
250
251	if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
252		val |= data & MDIO_RW_DATA_M;
253
254	mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
255
256	ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
257				PHY_ACS_ST, 0, 5000, 0);
258	if (ret) {
259		pr_warn("MDIO access timeout\n");
260		return ret;
261	}
262
263	if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
264		val = mtk_gmac_read(priv, GMAC_PIAC_REG);
265		return val & MDIO_RW_DATA_M;
266	}
267
268	return 0;
269}
270
271/* Direct MDIO clause 22 read via SoC */
272static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
273{
274	return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
275}
276
277/* Direct MDIO clause 22 write via SoC */
278static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
279{
280	return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
281}
282
283/* Direct MDIO clause 45 read via SoC */
284static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
285{
286	int ret;
287
288	ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
289	if (ret)
290		return ret;
291
292	return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
293			  MDIO_ST_C45);
294}
295
296/* Direct MDIO clause 45 write via SoC */
297static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
298			 u16 reg, u16 val)
299{
300	int ret;
301
302	ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
303	if (ret)
304		return ret;
305
306	return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
307			  MDIO_ST_C45);
308}
309
310/* Indirect MDIO clause 45 read via MII registers */
311static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
312			    u16 reg)
313{
314	int ret;
315
316	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
317			      (MMD_ADDR << MMD_CMD_S) |
318			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
319	if (ret)
320		return ret;
321
322	ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
323	if (ret)
324		return ret;
325
326	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
327			      (MMD_DATA << MMD_CMD_S) |
328			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
329	if (ret)
330		return ret;
331
332	return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
333}
334
335/* Indirect MDIO clause 45 write via MII registers */
336static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
337			     u16 reg, u16 val)
338{
339	int ret;
340
341	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
342			      (MMD_ADDR << MMD_CMD_S) |
343			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
344	if (ret)
345		return ret;
346
347	ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
348	if (ret)
349		return ret;
350
351	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
352			      (MMD_DATA << MMD_CMD_S) |
353			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
354	if (ret)
355		return ret;
356
357	return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
358}
359
360/*
361 * MT7530 Internal Register Address Bits
362 * -------------------------------------------------------------------
363 * | 15  14  13  12  11  10   9   8   7   6 | 5   4   3   2 | 1   0  |
364 * |----------------------------------------|---------------|--------|
365 * |              Page Address              |  Reg Address  | Unused |
366 * -------------------------------------------------------------------
367 */
368
369static int mt753x_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
370{
371	int ret, low_word, high_word;
372
373	if (priv->sw == SW_MT7988) {
374		*data = mtk_gsw_read(priv, reg);
375		return 0;
376	}
377
378	/* Write page address */
379	ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
380	if (ret)
381		return ret;
382
383	/* Read low word */
384	low_word = mtk_mii_read(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf);
385	if (low_word < 0)
386		return low_word;
387
388	/* Read high word */
389	high_word = mtk_mii_read(priv, priv->mt753x_smi_addr, 0x10);
390	if (high_word < 0)
391		return high_word;
392
393	if (data)
394		*data = ((u32)high_word << 16) | (low_word & 0xffff);
395
396	return 0;
397}
398
399static int mt753x_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
400{
401	int ret;
402
403	if (priv->sw == SW_MT7988) {
404		mtk_gsw_write(priv, reg, data);
405		return 0;
406	}
407
408	/* Write page address */
409	ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
410	if (ret)
411		return ret;
412
413	/* Write low word */
414	ret = mtk_mii_write(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf,
415			    data & 0xffff);
416	if (ret)
417		return ret;
418
419	/* Write high word */
420	return mtk_mii_write(priv, priv->mt753x_smi_addr, 0x10, data >> 16);
421}
422
423static void mt753x_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
424			   u32 set)
425{
426	u32 val;
427
428	mt753x_reg_read(priv, reg, &val);
429	val &= ~clr;
430	val |= set;
431	mt753x_reg_write(priv, reg, val);
432}
433
434/* Indirect MDIO clause 22/45 access */
435static int mt7531_mii_rw(struct mtk_eth_priv *priv, int phy, int reg, u16 data,
436			 u32 cmd, u32 st)
437{
438	ulong timeout;
439	u32 val, timeout_ms;
440	int ret = 0;
441
442	val = (st << MDIO_ST_S) |
443	      ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
444	      ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
445	      ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
446
447	if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
448		val |= data & MDIO_RW_DATA_M;
449
450	mt753x_reg_write(priv, MT7531_PHY_IAC, val | PHY_ACS_ST);
451
452	timeout_ms = 100;
453	timeout = get_timer(0);
454	while (1) {
455		mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
456
457		if ((val & PHY_ACS_ST) == 0)
458			break;
459
460		if (get_timer(timeout) > timeout_ms)
461			return -ETIMEDOUT;
462	}
463
464	if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
465		mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
466		ret = val & MDIO_RW_DATA_M;
467	}
468
469	return ret;
470}
471
472static int mt7531_mii_ind_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
473{
474	u8 phy_addr;
475
476	if (phy >= MT753X_NUM_PHYS)
477		return -EINVAL;
478
479	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
480
481	return mt7531_mii_rw(priv, phy_addr, reg, 0, MDIO_CMD_READ,
482			     MDIO_ST_C22);
483}
484
485static int mt7531_mii_ind_write(struct mtk_eth_priv *priv, u8 phy, u8 reg,
486				u16 val)
487{
488	u8 phy_addr;
489
490	if (phy >= MT753X_NUM_PHYS)
491		return -EINVAL;
492
493	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
494
495	return mt7531_mii_rw(priv, phy_addr, reg, val, MDIO_CMD_WRITE,
496			     MDIO_ST_C22);
497}
498
499static int mt7531_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
500			       u16 reg)
501{
502	u8 phy_addr;
503	int ret;
504
505	if (addr >= MT753X_NUM_PHYS)
506		return -EINVAL;
507
508	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
509
510	ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
511			    MDIO_ST_C45);
512	if (ret)
513		return ret;
514
515	return mt7531_mii_rw(priv, phy_addr, devad, 0, MDIO_CMD_READ_C45,
516			     MDIO_ST_C45);
517}
518
519static int mt7531_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
520				u16 reg, u16 val)
521{
522	u8 phy_addr;
523	int ret;
524
525	if (addr >= MT753X_NUM_PHYS)
526		return 0;
527
528	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
529
530	ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
531			    MDIO_ST_C45);
532	if (ret)
533		return ret;
534
535	return mt7531_mii_rw(priv, phy_addr, devad, val, MDIO_CMD_WRITE,
536			     MDIO_ST_C45);
537}
538
539static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
540{
541	struct mtk_eth_priv *priv = bus->priv;
542
543	if (devad < 0)
544		return priv->mii_read(priv, addr, reg);
545	else
546		return priv->mmd_read(priv, addr, devad, reg);
547}
548
549static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
550			  u16 val)
551{
552	struct mtk_eth_priv *priv = bus->priv;
553
554	if (devad < 0)
555		return priv->mii_write(priv, addr, reg, val);
556	else
557		return priv->mmd_write(priv, addr, devad, reg, val);
558}
559
560static int mtk_mdio_register(struct udevice *dev)
561{
562	struct mtk_eth_priv *priv = dev_get_priv(dev);
563	struct mii_dev *mdio_bus = mdio_alloc();
564	int ret;
565
566	if (!mdio_bus)
567		return -ENOMEM;
568
569	/* Assign MDIO access APIs according to the switch/phy */
570	switch (priv->sw) {
571	case SW_MT7530:
572		priv->mii_read = mtk_mii_read;
573		priv->mii_write = mtk_mii_write;
574		priv->mmd_read = mtk_mmd_ind_read;
575		priv->mmd_write = mtk_mmd_ind_write;
576		break;
577	case SW_MT7531:
578	case SW_MT7988:
579		priv->mii_read = mt7531_mii_ind_read;
580		priv->mii_write = mt7531_mii_ind_write;
581		priv->mmd_read = mt7531_mmd_ind_read;
582		priv->mmd_write = mt7531_mmd_ind_write;
583		break;
584	default:
585		priv->mii_read = mtk_mii_read;
586		priv->mii_write = mtk_mii_write;
587		priv->mmd_read = mtk_mmd_read;
588		priv->mmd_write = mtk_mmd_write;
589	}
590
591	mdio_bus->read = mtk_mdio_read;
592	mdio_bus->write = mtk_mdio_write;
593	snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
594
595	mdio_bus->priv = (void *)priv;
596
597	ret = mdio_register(mdio_bus);
598
599	if (ret)
600		return ret;
601
602	priv->mdio_bus = mdio_bus;
603
604	return 0;
605}
606
607static int mt753x_core_reg_read(struct mtk_eth_priv *priv, u32 reg)
608{
609	u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
610
611	return priv->mmd_read(priv, phy_addr, 0x1f, reg);
612}
613
614static void mt753x_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
615{
616	u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
617
618	priv->mmd_write(priv, phy_addr, 0x1f, reg, val);
619}
620
621static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
622{
623	u32 ncpo1, ssc_delta;
624
625	switch (mode) {
626	case PHY_INTERFACE_MODE_RGMII:
627		ncpo1 = 0x0c80;
628		ssc_delta = 0x87;
629		break;
630	default:
631		printf("error: xMII mode %d not supported\n", mode);
632		return -EINVAL;
633	}
634
635	/* Disable MT7530 core clock */
636	mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
637
638	/* Disable MT7530 PLL */
639	mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
640			      (2 << RG_GSWPLL_POSDIV_200M_S) |
641			      (32 << RG_GSWPLL_FBKDIV_200M_S));
642
643	/* For MT7530 core clock = 500Mhz */
644	mt753x_core_reg_write(priv, CORE_GSWPLL_GRP2,
645			      (1 << RG_GSWPLL_POSDIV_500M_S) |
646			      (25 << RG_GSWPLL_FBKDIV_500M_S));
647
648	/* Enable MT7530 PLL */
649	mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
650			      (2 << RG_GSWPLL_POSDIV_200M_S) |
651			      (32 << RG_GSWPLL_FBKDIV_200M_S) |
652			      RG_GSWPLL_EN_PRE);
653
654	udelay(20);
655
656	mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
657
658	/* Setup the MT7530 TRGMII Tx Clock */
659	mt753x_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
660	mt753x_core_reg_write(priv, CORE_PLL_GROUP6, 0);
661	mt753x_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
662	mt753x_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
663	mt753x_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
664			      RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
665
666	mt753x_core_reg_write(priv, CORE_PLL_GROUP2,
667			      RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
668			      (1 << RG_SYSPLL_POSDIV_S));
669
670	mt753x_core_reg_write(priv, CORE_PLL_GROUP7,
671			      RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
672			      RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
673
674	/* Enable MT7530 core clock */
675	mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
676			      REG_GSWCK_EN | REG_TRGMIICK_EN);
677
678	return 0;
679}
680
681static void mt7530_mac_control(struct mtk_eth_priv *priv, bool enable)
682{
683	u32 pmcr = FORCE_MODE;
684
685	if (enable)
686		pmcr = priv->mt753x_pmcr;
687
688	mt753x_reg_write(priv, PMCR_REG(6), pmcr);
689}
690
691static int mt7530_setup(struct mtk_eth_priv *priv)
692{
693	u16 phy_addr, phy_val;
694	u32 val, txdrv;
695	int i;
696
697	if (!MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
698		/* Select 250MHz clk for RGMII mode */
699		mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
700			       ETHSYS_TRGMII_CLK_SEL362_5, 0);
701
702		txdrv = 8;
703	} else {
704		txdrv = 4;
705	}
706
707	/* Modify HWTRAP first to allow direct access to internal PHYs */
708	mt753x_reg_read(priv, HWTRAP_REG, &val);
709	val |= CHG_TRAP;
710	val &= ~C_MDIO_BPS;
711	mt753x_reg_write(priv, MHWTRAP_REG, val);
712
713	/* Calculate the phy base address */
714	val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
715	priv->mt753x_phy_base = (val | 0x7) + 1;
716
717	/* Turn off PHYs */
718	for (i = 0; i < MT753X_NUM_PHYS; i++) {
719		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
720		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
721		phy_val |= BMCR_PDOWN;
722		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
723	}
724
725	/* Force MAC link down before reset */
726	mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
727	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
728
729	/* MT7530 reset */
730	mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
731	udelay(100);
732
733	val = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
734	      MAC_MODE | FORCE_MODE |
735	      MAC_TX_EN | MAC_RX_EN |
736	      BKOFF_EN | BACKPR_EN |
737	      (SPEED_1000M << FORCE_SPD_S) |
738	      FORCE_DPX | FORCE_LINK;
739
740	/* MT7530 Port6: Forced 1000M/FD, FC disabled */
741	priv->mt753x_pmcr = val;
742
743	/* MT7530 Port5: Forced link down */
744	mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
745
746	/* Keep MAC link down before starting eth */
747	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
748
749	/* MT7530 Port6: Set to RGMII */
750	mt753x_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
751
752	/* Hardware Trap: Enable Port6, Disable Port5 */
753	mt753x_reg_read(priv, HWTRAP_REG, &val);
754	val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
755	       (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
756	       (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
757	val &= ~(C_MDIO_BPS | P6_INTF_DIS);
758	mt753x_reg_write(priv, MHWTRAP_REG, val);
759
760	/* Setup switch core pll */
761	mt7530_pad_clk_setup(priv, priv->phy_interface);
762
763	/* Lower Tx Driving for TRGMII path */
764	for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
765		mt753x_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
766				 (txdrv << TD_DM_DRVP_S) |
767				 (txdrv << TD_DM_DRVN_S));
768
769	for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
770		mt753x_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
771
772	/* Turn on PHYs */
773	for (i = 0; i < MT753X_NUM_PHYS; i++) {
774		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
775		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
776		phy_val &= ~BMCR_PDOWN;
777		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
778	}
779
780	return 0;
781}
782
783static void mt7531_core_pll_setup(struct mtk_eth_priv *priv, int mcm)
784{
785	/* Step 1 : Disable MT7531 COREPLL */
786	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, 0);
787
788	/* Step 2: switch to XTAL output */
789	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_CLKSW, SW_CLKSW);
790
791	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, 0);
792
793	/* Step 3: disable PLLGP and enable program PLLGP */
794	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_PLLGP, SW_PLLGP);
795
796	/* Step 4: program COREPLL output frequency to 500MHz */
797	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_POSDIV_M,
798		       2 << RG_COREPLL_POSDIV_S);
799	udelay(25);
800
801	/* Currently, support XTAL 25Mhz only */
802	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_M,
803		       0x140000 << RG_COREPLL_SDM_PCW_S);
804
805	/* Set feedback divide ratio update signal to high */
806	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG,
807		       RG_COREPLL_SDM_PCW_CHG);
808
809	/* Wait for at least 16 XTAL clocks */
810	udelay(10);
811
812	/* Step 5: set feedback divide ratio update signal to low */
813	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG, 0);
814
815	/* add enable 325M clock for SGMII */
816	mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
817
818	/* add enable 250SSC clock for RGMII */
819	mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
820
821	/*Step 6: Enable MT7531 PLL */
822	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, RG_COREPLL_EN);
823
824	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, EN_COREPLL);
825
826	udelay(25);
827}
828
829static int mt7531_port_sgmii_init(struct mtk_eth_priv *priv,
830				  u32 port)
831{
832	if (port != 5 && port != 6) {
833		printf("mt7531: port %d is not a SGMII port\n", port);
834		return -EINVAL;
835	}
836
837	/* Set SGMII GEN2 speed(2.5G) */
838	mt753x_reg_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
839		       SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
840
841	/* Disable SGMII AN */
842	mt753x_reg_rmw(priv, MT7531_PCS_CONTROL_1(port),
843		       SGMII_AN_ENABLE, 0);
844
845	/* SGMII force mode setting */
846	mt753x_reg_write(priv, MT7531_SGMII_MODE(port), SGMII_FORCE_MODE);
847
848	/* Release PHYA power down state */
849	mt753x_reg_rmw(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
850		       SGMII_PHYA_PWD, 0);
851
852	return 0;
853}
854
855static int mt7531_port_rgmii_init(struct mtk_eth_priv *priv, u32 port)
856{
857	u32 val;
858
859	if (port != 5) {
860		printf("error: RGMII mode is not available for port %d\n",
861		       port);
862		return -EINVAL;
863	}
864
865	mt753x_reg_read(priv, MT7531_CLKGEN_CTRL, &val);
866	val |= GP_CLK_EN;
867	val &= ~GP_MODE_M;
868	val |= GP_MODE_RGMII << GP_MODE_S;
869	val |= TXCLK_NO_REVERSE;
870	val |= RXCLK_NO_DELAY;
871	val &= ~CLK_SKEW_IN_M;
872	val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
873	val &= ~CLK_SKEW_OUT_M;
874	val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
875	mt753x_reg_write(priv, MT7531_CLKGEN_CTRL, val);
876
877	return 0;
878}
879
880static void mt7531_phy_setting(struct mtk_eth_priv *priv)
881{
882	int i;
883	u32 val;
884
885	for (i = 0; i < MT753X_NUM_PHYS; i++) {
886		/* Enable HW auto downshift */
887		priv->mii_write(priv, i, 0x1f, 0x1);
888		val = priv->mii_read(priv, i, PHY_EXT_REG_14);
889		val |= PHY_EN_DOWN_SHFIT;
890		priv->mii_write(priv, i, PHY_EXT_REG_14, val);
891
892		/* PHY link down power saving enable */
893		val = priv->mii_read(priv, i, PHY_EXT_REG_17);
894		val |= PHY_LINKDOWN_POWER_SAVING_EN;
895		priv->mii_write(priv, i, PHY_EXT_REG_17, val);
896
897		val = priv->mmd_read(priv, i, 0x1e, PHY_DEV1E_REG_0C6);
898		val &= ~PHY_POWER_SAVING_M;
899		val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
900		priv->mmd_write(priv, i, 0x1e, PHY_DEV1E_REG_0C6, val);
901	}
902}
903
904static void mt7531_mac_control(struct mtk_eth_priv *priv, bool enable)
905{
906	u32 pmcr = FORCE_MODE_LNK;
907
908	if (enable)
909		pmcr = priv->mt753x_pmcr;
910
911	mt753x_reg_write(priv, PMCR_REG(5), pmcr);
912	mt753x_reg_write(priv, PMCR_REG(6), pmcr);
913}
914
915static int mt7531_setup(struct mtk_eth_priv *priv)
916{
917	u16 phy_addr, phy_val;
918	u32 val;
919	u32 pmcr;
920	u32 port5_sgmii;
921	int i;
922
923	priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
924				MT753X_SMI_ADDR_MASK;
925
926	/* Turn off PHYs */
927	for (i = 0; i < MT753X_NUM_PHYS; i++) {
928		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
929		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
930		phy_val |= BMCR_PDOWN;
931		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
932	}
933
934	/* Force MAC link down before reset */
935	mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
936	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
937
938	/* Switch soft reset */
939	mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
940	udelay(100);
941
942	/* Enable MDC input Schmitt Trigger */
943	mt753x_reg_rmw(priv, MT7531_SMT0_IOLB, SMT_IOLB_5_SMI_MDC_EN,
944		       SMT_IOLB_5_SMI_MDC_EN);
945
946	mt7531_core_pll_setup(priv, priv->mcm);
947
948	mt753x_reg_read(priv, MT7531_TOP_SIG_SR, &val);
949	port5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
950
951	/* port5 support either RGMII or SGMII, port6 only support SGMII. */
952	switch (priv->phy_interface) {
953	case PHY_INTERFACE_MODE_RGMII:
954		if (!port5_sgmii)
955			mt7531_port_rgmii_init(priv, 5);
956		break;
957	case PHY_INTERFACE_MODE_2500BASEX:
958		mt7531_port_sgmii_init(priv, 6);
959		if (port5_sgmii)
960			mt7531_port_sgmii_init(priv, 5);
961		break;
962	default:
963		break;
964	}
965
966	pmcr = MT7531_FORCE_MODE |
967	       (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
968	       MAC_MODE | MAC_TX_EN | MAC_RX_EN |
969	       BKOFF_EN | BACKPR_EN |
970	       FORCE_RX_FC | FORCE_TX_FC |
971	       (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
972	       FORCE_LINK;
973
974	priv->mt753x_pmcr = pmcr;
975
976	/* Keep MAC link down before starting eth */
977	mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
978	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
979
980	/* Turn on PHYs */
981	for (i = 0; i < MT753X_NUM_PHYS; i++) {
982		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
983		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
984		phy_val &= ~BMCR_PDOWN;
985		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
986	}
987
988	mt7531_phy_setting(priv);
989
990	/* Enable Internal PHYs */
991	val = mt753x_core_reg_read(priv, CORE_PLL_GROUP4);
992	val |= MT7531_BYPASS_MODE;
993	val &= ~MT7531_POWER_ON_OFF;
994	mt753x_core_reg_write(priv, CORE_PLL_GROUP4, val);
995
996	return 0;
997}
998
999static void mt7988_phy_setting(struct mtk_eth_priv *priv)
1000{
1001	u16 val;
1002	u32 i;
1003
1004	for (i = 0; i < MT753X_NUM_PHYS; i++) {
1005		/* Enable HW auto downshift */
1006		priv->mii_write(priv, i, 0x1f, 0x1);
1007		val = priv->mii_read(priv, i, PHY_EXT_REG_14);
1008		val |= PHY_EN_DOWN_SHFIT;
1009		priv->mii_write(priv, i, PHY_EXT_REG_14, val);
1010
1011		/* PHY link down power saving enable */
1012		val = priv->mii_read(priv, i, PHY_EXT_REG_17);
1013		val |= PHY_LINKDOWN_POWER_SAVING_EN;
1014		priv->mii_write(priv, i, PHY_EXT_REG_17, val);
1015	}
1016}
1017
1018static void mt7988_mac_control(struct mtk_eth_priv *priv, bool enable)
1019{
1020	u32 pmcr = FORCE_MODE_LNK;
1021
1022	if (enable)
1023		pmcr = priv->mt753x_pmcr;
1024
1025	mt753x_reg_write(priv, PMCR_REG(6), pmcr);
1026}
1027
1028static int mt7988_setup(struct mtk_eth_priv *priv)
1029{
1030	u16 phy_addr, phy_val;
1031	u32 pmcr;
1032	int i;
1033
1034	priv->gsw_base = regmap_get_range(priv->ethsys_regmap, 0) + GSW_BASE;
1035
1036	priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
1037				MT753X_SMI_ADDR_MASK;
1038
1039	/* Turn off PHYs */
1040	for (i = 0; i < MT753X_NUM_PHYS; i++) {
1041		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
1042		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
1043		phy_val |= BMCR_PDOWN;
1044		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
1045	}
1046
1047	switch (priv->phy_interface) {
1048	case PHY_INTERFACE_MODE_USXGMII:
1049		/* Use CPU bridge instead of actual USXGMII path */
1050
1051		/* Set GDM1 no drop */
1052		mtk_fe_rmw(priv, PSE_NO_DROP_CFG_REG, 0, PSE_NO_DROP_GDM1);
1053
1054		/* Enable GDM1 to GSW CPU bridge */
1055		mtk_gmac_rmw(priv, GMAC_MAC_MISC_REG, 0, BIT(0));
1056
1057		/* XGMAC force link up */
1058		mtk_gmac_rmw(priv, GMAC_XGMAC_STS_REG, 0, P1_XGMAC_FORCE_LINK);
1059
1060		/* Setup GSW CPU bridge IPG */
1061		mtk_gmac_rmw(priv, GMAC_GSW_CFG_REG, GSWTX_IPG_M | GSWRX_IPG_M,
1062			     (0xB << GSWTX_IPG_S) | (0xB << GSWRX_IPG_S));
1063		break;
1064	default:
1065		printf("Error: MT7988 GSW does not support %s interface\n",
1066		       phy_string_for_interface(priv->phy_interface));
1067		break;
1068	}
1069
1070	pmcr = MT7988_FORCE_MODE |
1071	       (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1072	       MAC_MODE | MAC_TX_EN | MAC_RX_EN |
1073	       BKOFF_EN | BACKPR_EN |
1074	       FORCE_RX_FC | FORCE_TX_FC |
1075	       (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
1076	       FORCE_LINK;
1077
1078	priv->mt753x_pmcr = pmcr;
1079
1080	/* Keep MAC link down before starting eth */
1081	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
1082
1083	/* Turn on PHYs */
1084	for (i = 0; i < MT753X_NUM_PHYS; i++) {
1085		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
1086		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
1087		phy_val &= ~BMCR_PDOWN;
1088		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
1089	}
1090
1091	mt7988_phy_setting(priv);
1092
1093	return 0;
1094}
1095
1096static int mt753x_switch_init(struct mtk_eth_priv *priv)
1097{
1098	int ret;
1099	int i;
1100
1101	/* Global reset switch */
1102	if (priv->mcm) {
1103		reset_assert(&priv->rst_mcm);
1104		udelay(1000);
1105		reset_deassert(&priv->rst_mcm);
1106		mdelay(priv->mt753x_reset_wait_time);
1107	} else if (dm_gpio_is_valid(&priv->rst_gpio)) {
1108		dm_gpio_set_value(&priv->rst_gpio, 0);
1109		udelay(1000);
1110		dm_gpio_set_value(&priv->rst_gpio, 1);
1111		mdelay(priv->mt753x_reset_wait_time);
1112	}
1113
1114	ret = priv->switch_init(priv);
1115	if (ret)
1116		return ret;
1117
1118	/* Set port isolation */
1119	for (i = 0; i < MT753X_NUM_PORTS; i++) {
1120		/* Set port matrix mode */
1121		if (i != 6)
1122			mt753x_reg_write(priv, PCR_REG(i),
1123					 (0x40 << PORT_MATRIX_S));
1124		else
1125			mt753x_reg_write(priv, PCR_REG(i),
1126					 (0x3f << PORT_MATRIX_S));
1127
1128		/* Set port mode to user port */
1129		mt753x_reg_write(priv, PVC_REG(i),
1130				 (0x8100 << STAG_VPID_S) |
1131				 (VLAN_ATTR_USER << VLAN_ATTR_S));
1132	}
1133
1134	return 0;
1135}
1136
1137static void mtk_xphy_link_adjust(struct mtk_eth_priv *priv)
1138{
1139	u16 lcl_adv = 0, rmt_adv = 0;
1140	u8 flowctrl;
1141	u32 mcr;
1142
1143	mcr = mtk_gmac_read(priv, XGMAC_PORT_MCR(priv->gmac_id));
1144	mcr &= ~(XGMAC_FORCE_TX_FC | XGMAC_FORCE_RX_FC);
1145
1146	if (priv->phydev->duplex) {
1147		if (priv->phydev->pause)
1148			rmt_adv = LPA_PAUSE_CAP;
1149		if (priv->phydev->asym_pause)
1150			rmt_adv |= LPA_PAUSE_ASYM;
1151
1152		if (priv->phydev->advertising & ADVERTISED_Pause)
1153			lcl_adv |= ADVERTISE_PAUSE_CAP;
1154		if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1155			lcl_adv |= ADVERTISE_PAUSE_ASYM;
1156
1157		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1158
1159		if (flowctrl & FLOW_CTRL_TX)
1160			mcr |= XGMAC_FORCE_TX_FC;
1161		if (flowctrl & FLOW_CTRL_RX)
1162			mcr |= XGMAC_FORCE_RX_FC;
1163
1164		debug("rx pause %s, tx pause %s\n",
1165		      flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1166		      flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1167	}
1168
1169	mcr &= ~(XGMAC_TRX_DISABLE);
1170	mtk_gmac_write(priv, XGMAC_PORT_MCR(priv->gmac_id), mcr);
1171}
1172
1173static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
1174{
1175	u16 lcl_adv = 0, rmt_adv = 0;
1176	u8 flowctrl;
1177	u32 mcr;
1178
1179	mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1180	      (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1181	      MAC_MODE | FORCE_MODE |
1182	      MAC_TX_EN | MAC_RX_EN |
1183	      DEL_RXFIFO_CLR |
1184	      BKOFF_EN | BACKPR_EN;
1185
1186	switch (priv->phydev->speed) {
1187	case SPEED_10:
1188		mcr |= (SPEED_10M << FORCE_SPD_S);
1189		break;
1190	case SPEED_100:
1191		mcr |= (SPEED_100M << FORCE_SPD_S);
1192		break;
1193	case SPEED_1000:
1194	case SPEED_2500:
1195		mcr |= (SPEED_1000M << FORCE_SPD_S);
1196		break;
1197	};
1198
1199	if (priv->phydev->link)
1200		mcr |= FORCE_LINK;
1201
1202	if (priv->phydev->duplex) {
1203		mcr |= FORCE_DPX;
1204
1205		if (priv->phydev->pause)
1206			rmt_adv = LPA_PAUSE_CAP;
1207		if (priv->phydev->asym_pause)
1208			rmt_adv |= LPA_PAUSE_ASYM;
1209
1210		if (priv->phydev->advertising & ADVERTISED_Pause)
1211			lcl_adv |= ADVERTISE_PAUSE_CAP;
1212		if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1213			lcl_adv |= ADVERTISE_PAUSE_ASYM;
1214
1215		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1216
1217		if (flowctrl & FLOW_CTRL_TX)
1218			mcr |= FORCE_TX_FC;
1219		if (flowctrl & FLOW_CTRL_RX)
1220			mcr |= FORCE_RX_FC;
1221
1222		debug("rx pause %s, tx pause %s\n",
1223		      flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1224		      flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1225	}
1226
1227	mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1228}
1229
1230static int mtk_phy_start(struct mtk_eth_priv *priv)
1231{
1232	struct phy_device *phydev = priv->phydev;
1233	int ret;
1234
1235	ret = phy_startup(phydev);
1236
1237	if (ret) {
1238		debug("Could not initialize PHY %s\n", phydev->dev->name);
1239		return ret;
1240	}
1241
1242	if (!phydev->link) {
1243		debug("%s: link down.\n", phydev->dev->name);
1244		return 0;
1245	}
1246
1247	if (!priv->force_mode) {
1248		if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII ||
1249		    priv->phy_interface == PHY_INTERFACE_MODE_XGMII)
1250			mtk_xphy_link_adjust(priv);
1251		else
1252			mtk_phy_link_adjust(priv);
1253	}
1254
1255	debug("Speed: %d, %s duplex%s\n", phydev->speed,
1256	      (phydev->duplex) ? "full" : "half",
1257	      (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
1258
1259	return 0;
1260}
1261
1262static int mtk_phy_probe(struct udevice *dev)
1263{
1264	struct mtk_eth_priv *priv = dev_get_priv(dev);
1265	struct phy_device *phydev;
1266
1267	phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
1268			     priv->phy_interface);
1269	if (!phydev)
1270		return -ENODEV;
1271
1272	phydev->supported &= PHY_GBIT_FEATURES;
1273	phydev->advertising = phydev->supported;
1274
1275	priv->phydev = phydev;
1276	phy_config(phydev);
1277
1278	return 0;
1279}
1280
1281static void mtk_sgmii_an_init(struct mtk_eth_priv *priv)
1282{
1283	/* Set SGMII GEN1 speed(1G) */
1284	clrsetbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1285			SGMSYS_SPEED_2500, 0);
1286
1287	/* Enable SGMII AN */
1288	setbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1289		     SGMII_AN_ENABLE);
1290
1291	/* SGMII AN mode setting */
1292	writel(SGMII_AN_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1293
1294	/* SGMII PN SWAP setting */
1295	if (priv->pn_swap) {
1296		setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1297			     SGMII_PN_SWAP_TX_RX);
1298	}
1299
1300	/* Release PHYA power down state */
1301	clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1302			SGMII_PHYA_PWD, 0);
1303}
1304
1305static void mtk_sgmii_force_init(struct mtk_eth_priv *priv)
1306{
1307	/* Set SGMII GEN2 speed(2.5G) */
1308	setbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1309		     SGMSYS_SPEED_2500);
1310
1311	/* Disable SGMII AN */
1312	clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1313			SGMII_AN_ENABLE, 0);
1314
1315	/* SGMII force mode setting */
1316	writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1317
1318	/* SGMII PN SWAP setting */
1319	if (priv->pn_swap) {
1320		setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1321			     SGMII_PN_SWAP_TX_RX);
1322	}
1323
1324	/* Release PHYA power down state */
1325	clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1326			SGMII_PHYA_PWD, 0);
1327}
1328
1329static void mtk_xfi_pll_enable(struct mtk_eth_priv *priv)
1330{
1331	u32 val = 0;
1332
1333	/* Add software workaround for USXGMII PLL TCL issue */
1334	regmap_write(priv->xfi_pll_regmap, XFI_PLL_ANA_GLB8,
1335		     RG_XFI_PLL_ANA_SWWA);
1336
1337	regmap_read(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, &val);
1338	val |= RG_XFI_PLL_EN;
1339	regmap_write(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, val);
1340}
1341
1342static void mtk_usxgmii_reset(struct mtk_eth_priv *priv)
1343{
1344	switch (priv->gmac_id) {
1345	case 1:
1346		regmap_write(priv->toprgu_regmap, 0xFC, 0x0000A004);
1347		regmap_write(priv->toprgu_regmap, 0x18, 0x88F0A004);
1348		regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1349		regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1350		regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1351		break;
1352	case 2:
1353		regmap_write(priv->toprgu_regmap, 0xFC, 0x00005002);
1354		regmap_write(priv->toprgu_regmap, 0x18, 0x88F05002);
1355		regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1356		regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1357		regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1358		break;
1359	}
1360
1361	mdelay(10);
1362}
1363
1364static void mtk_usxgmii_setup_phya_an_10000(struct mtk_eth_priv *priv)
1365{
1366	regmap_write(priv->usxgmii_regmap, 0x810, 0x000FFE6D);
1367	regmap_write(priv->usxgmii_regmap, 0x818, 0x07B1EC7B);
1368	regmap_write(priv->usxgmii_regmap, 0x80C, 0x30000000);
1369	ndelay(1020);
1370	regmap_write(priv->usxgmii_regmap, 0x80C, 0x10000000);
1371	ndelay(1020);
1372	regmap_write(priv->usxgmii_regmap, 0x80C, 0x00000000);
1373
1374	regmap_write(priv->xfi_pextp_regmap, 0x9024, 0x00C9071C);
1375	regmap_write(priv->xfi_pextp_regmap, 0x2020, 0xAA8585AA);
1376	regmap_write(priv->xfi_pextp_regmap, 0x2030, 0x0C020707);
1377	regmap_write(priv->xfi_pextp_regmap, 0x2034, 0x0E050F0F);
1378	regmap_write(priv->xfi_pextp_regmap, 0x2040, 0x00140032);
1379	regmap_write(priv->xfi_pextp_regmap, 0x50F0, 0x00C014AA);
1380	regmap_write(priv->xfi_pextp_regmap, 0x50E0, 0x3777C12B);
1381	regmap_write(priv->xfi_pextp_regmap, 0x506C, 0x005F9CFF);
1382	regmap_write(priv->xfi_pextp_regmap, 0x5070, 0x9D9DFAFA);
1383	regmap_write(priv->xfi_pextp_regmap, 0x5074, 0x27273F3F);
1384	regmap_write(priv->xfi_pextp_regmap, 0x5078, 0xA7883C68);
1385	regmap_write(priv->xfi_pextp_regmap, 0x507C, 0x11661166);
1386	regmap_write(priv->xfi_pextp_regmap, 0x5080, 0x0E000AAF);
1387	regmap_write(priv->xfi_pextp_regmap, 0x5084, 0x08080D0D);
1388	regmap_write(priv->xfi_pextp_regmap, 0x5088, 0x02030909);
1389	regmap_write(priv->xfi_pextp_regmap, 0x50E4, 0x0C0C0000);
1390	regmap_write(priv->xfi_pextp_regmap, 0x50E8, 0x04040000);
1391	regmap_write(priv->xfi_pextp_regmap, 0x50EC, 0x0F0F0C06);
1392	regmap_write(priv->xfi_pextp_regmap, 0x50A8, 0x506E8C8C);
1393	regmap_write(priv->xfi_pextp_regmap, 0x6004, 0x18190000);
1394	regmap_write(priv->xfi_pextp_regmap, 0x00F8, 0x01423342);
1395	regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F20);
1396	regmap_write(priv->xfi_pextp_regmap, 0x0030, 0x00050C00);
1397	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x02002800);
1398	ndelay(1020);
1399	regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000020);
1400	regmap_write(priv->xfi_pextp_regmap, 0x3028, 0x00008A01);
1401	regmap_write(priv->xfi_pextp_regmap, 0x302C, 0x0000A884);
1402	regmap_write(priv->xfi_pextp_regmap, 0x3024, 0x00083002);
1403	regmap_write(priv->xfi_pextp_regmap, 0x3010, 0x00022220);
1404	regmap_write(priv->xfi_pextp_regmap, 0x5064, 0x0F020A01);
1405	regmap_write(priv->xfi_pextp_regmap, 0x50B4, 0x06100600);
1406	regmap_write(priv->xfi_pextp_regmap, 0x3048, 0x40704000);
1407	regmap_write(priv->xfi_pextp_regmap, 0x3050, 0xA8000000);
1408	regmap_write(priv->xfi_pextp_regmap, 0x3054, 0x000000AA);
1409	regmap_write(priv->xfi_pextp_regmap, 0x306C, 0x00000F00);
1410	regmap_write(priv->xfi_pextp_regmap, 0xA060, 0x00040000);
1411	regmap_write(priv->xfi_pextp_regmap, 0x90D0, 0x00000001);
1412	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200E800);
1413	udelay(150);
1414	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C111);
1415	ndelay(1020);
1416	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C101);
1417	udelay(15);
1418	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C111);
1419	ndelay(1020);
1420	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C101);
1421	udelay(100);
1422	regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000030);
1423	regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F00);
1424	regmap_write(priv->xfi_pextp_regmap, 0x3040, 0x30000000);
1425	udelay(400);
1426}
1427
1428static void mtk_usxgmii_an_init(struct mtk_eth_priv *priv)
1429{
1430	mtk_xfi_pll_enable(priv);
1431	mtk_usxgmii_reset(priv);
1432	mtk_usxgmii_setup_phya_an_10000(priv);
1433}
1434
1435static void mtk_mac_init(struct mtk_eth_priv *priv)
1436{
1437	int i, ge_mode = 0;
1438	u32 mcr;
1439
1440	switch (priv->phy_interface) {
1441	case PHY_INTERFACE_MODE_RGMII_RXID:
1442	case PHY_INTERFACE_MODE_RGMII:
1443		ge_mode = GE_MODE_RGMII;
1444		break;
1445	case PHY_INTERFACE_MODE_SGMII:
1446	case PHY_INTERFACE_MODE_2500BASEX:
1447		if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC2_U3_QPHY)) {
1448			mtk_infra_rmw(priv, USB_PHY_SWITCH_REG, QPHY_SEL_MASK,
1449				      SGMII_QPHY_SEL);
1450		}
1451
1452		ge_mode = GE_MODE_RGMII;
1453		mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
1454			       SYSCFG0_SGMII_SEL(priv->gmac_id));
1455		if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
1456			mtk_sgmii_an_init(priv);
1457		else
1458			mtk_sgmii_force_init(priv);
1459		break;
1460	case PHY_INTERFACE_MODE_MII:
1461	case PHY_INTERFACE_MODE_GMII:
1462		ge_mode = GE_MODE_MII;
1463		break;
1464	case PHY_INTERFACE_MODE_RMII:
1465		ge_mode = GE_MODE_RMII;
1466		break;
1467	default:
1468		break;
1469	}
1470
1471	/* set the gmac to the right mode */
1472	mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1473		       SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1474		       ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
1475
1476	if (priv->force_mode) {
1477		mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1478		      (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1479		      MAC_MODE | FORCE_MODE |
1480		      MAC_TX_EN | MAC_RX_EN |
1481		      BKOFF_EN | BACKPR_EN |
1482		      FORCE_LINK;
1483
1484		switch (priv->speed) {
1485		case SPEED_10:
1486			mcr |= SPEED_10M << FORCE_SPD_S;
1487			break;
1488		case SPEED_100:
1489			mcr |= SPEED_100M << FORCE_SPD_S;
1490			break;
1491		case SPEED_1000:
1492		case SPEED_2500:
1493			mcr |= SPEED_1000M << FORCE_SPD_S;
1494			break;
1495		}
1496
1497		if (priv->duplex)
1498			mcr |= FORCE_DPX;
1499
1500		mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1501	}
1502
1503	if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC1_TRGMII) &&
1504	    !MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
1505		/* Lower Tx Driving for TRGMII path */
1506		for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
1507			mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
1508				       (8 << TD_DM_DRVP_S) |
1509				       (8 << TD_DM_DRVN_S));
1510
1511		mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
1512			     RX_RST | RXC_DQSISEL);
1513		mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
1514	}
1515}
1516
1517static void mtk_xmac_init(struct mtk_eth_priv *priv)
1518{
1519	u32 force_link = 0;
1520
1521	switch (priv->phy_interface) {
1522	case PHY_INTERFACE_MODE_USXGMII:
1523		mtk_usxgmii_an_init(priv);
1524		break;
1525	default:
1526		break;
1527	}
1528
1529	/* Set GMAC to the correct mode */
1530	mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1531		       SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1532		       0);
1533
1534	if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII &&
1535	    priv->gmac_id == 1) {
1536		mtk_infra_rmw(priv, TOPMISC_NETSYS_PCS_MUX,
1537			      NETSYS_PCS_MUX_MASK, MUX_G2_USXGMII_SEL);
1538	}
1539
1540	if (priv->phy_interface == PHY_INTERFACE_MODE_XGMII ||
1541	    priv->gmac_id == 2)
1542		force_link = XGMAC_FORCE_LINK(priv->gmac_id);
1543
1544	mtk_gmac_rmw(priv, XGMAC_STS(priv->gmac_id),
1545		     XGMAC_FORCE_LINK(priv->gmac_id), force_link);
1546
1547	/* Force GMAC link down */
1548	mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), FORCE_MODE);
1549}
1550
1551static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
1552{
1553	char *pkt_base = priv->pkt_pool;
1554	struct mtk_tx_dma_v2 *txd;
1555	struct mtk_rx_dma_v2 *rxd;
1556	int i;
1557
1558	mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
1559	udelay(500);
1560
1561	memset(priv->tx_ring_noc, 0, NUM_TX_DESC * priv->soc->txd_size);
1562	memset(priv->rx_ring_noc, 0, NUM_RX_DESC * priv->soc->rxd_size);
1563	memset(priv->pkt_pool, 0xff, TOTAL_PKT_BUF_SIZE);
1564
1565	flush_dcache_range((ulong)pkt_base,
1566			   (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
1567
1568	priv->rx_dma_owner_idx0 = 0;
1569	priv->tx_cpu_owner_idx0 = 0;
1570
1571	for (i = 0; i < NUM_TX_DESC; i++) {
1572		txd = priv->tx_ring_noc + i * priv->soc->txd_size;
1573
1574		txd->txd1 = virt_to_phys(pkt_base);
1575		txd->txd2 = PDMA_TXD2_DDONE | PDMA_TXD2_LS0;
1576
1577		if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1578			txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id == 2 ?
1579							   15 : priv->gmac_id + 1);
1580		else if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1581			txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id + 1);
1582		else
1583			txd->txd4 = PDMA_V1_TXD4_FPORT_SET(priv->gmac_id + 1);
1584
1585		pkt_base += PKTSIZE_ALIGN;
1586	}
1587
1588	for (i = 0; i < NUM_RX_DESC; i++) {
1589		rxd = priv->rx_ring_noc + i * priv->soc->rxd_size;
1590
1591		rxd->rxd1 = virt_to_phys(pkt_base);
1592
1593		if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1594		    MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1595			rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1596		else
1597			rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1598
1599		pkt_base += PKTSIZE_ALIGN;
1600	}
1601
1602	mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
1603		       virt_to_phys(priv->tx_ring_noc));
1604	mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
1605	mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1606
1607	mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
1608		       virt_to_phys(priv->rx_ring_noc));
1609	mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
1610	mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
1611
1612	mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
1613}
1614
1615static void mtk_eth_mdc_init(struct mtk_eth_priv *priv)
1616{
1617	u32 divider;
1618
1619	if (priv->mdc == 0)
1620		return;
1621
1622	divider = min_t(u32, DIV_ROUND_UP(MDC_MAX_FREQ, priv->mdc), MDC_MAX_DIVIDER);
1623
1624	/* Configure MDC turbo mode */
1625	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1626		mtk_gmac_rmw(priv, GMAC_MAC_MISC_REG, 0, MISC_MDC_TURBO);
1627	else
1628		mtk_gmac_rmw(priv, GMAC_PPSC_REG, 0, MISC_MDC_TURBO);
1629
1630	/* Configure MDC divider */
1631	mtk_gmac_rmw(priv, GMAC_PPSC_REG, PHY_MDC_CFG,
1632		     FIELD_PREP(PHY_MDC_CFG, divider));
1633}
1634
1635static int mtk_eth_start(struct udevice *dev)
1636{
1637	struct mtk_eth_priv *priv = dev_get_priv(dev);
1638	int i, ret;
1639
1640	/* Reset FE */
1641	reset_assert(&priv->rst_fe);
1642	udelay(1000);
1643	reset_deassert(&priv->rst_fe);
1644	mdelay(10);
1645
1646	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1647	    MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1648		setbits_le32(priv->fe_base + FE_GLO_MISC_REG, PDMA_VER_V2);
1649
1650	/* Packets forward to PDMA */
1651	mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
1652
1653	for (i = 0; i < priv->soc->gdma_count; i++) {
1654		if (i == priv->gmac_id)
1655			continue;
1656
1657		mtk_gdma_write(priv, i, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1658	}
1659
1660	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3)) {
1661		if (priv->sw == SW_MT7988 && priv->gmac_id == 0) {
1662			mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG,
1663				       GDMA_BRIDGE_TO_CPU);
1664		}
1665
1666		mtk_gdma_write(priv, priv->gmac_id, GDMA_EG_CTRL_REG,
1667			       GDMA_CPU_BRIDGE_EN);
1668	}
1669
1670	udelay(500);
1671
1672	mtk_eth_fifo_init(priv);
1673
1674	if (priv->switch_mac_control)
1675		priv->switch_mac_control(priv, true);
1676
1677	/* Start PHY */
1678	if (priv->sw == SW_NONE) {
1679		ret = mtk_phy_start(priv);
1680		if (ret)
1681			return ret;
1682	}
1683
1684	mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
1685		     TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
1686	udelay(500);
1687
1688	return 0;
1689}
1690
1691static void mtk_eth_stop(struct udevice *dev)
1692{
1693	struct mtk_eth_priv *priv = dev_get_priv(dev);
1694
1695	if (priv->switch_mac_control)
1696		priv->switch_mac_control(priv, false);
1697
1698	mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
1699		     TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
1700	udelay(500);
1701
1702	wait_for_bit_le32(priv->fe_base + priv->soc->pdma_base + PDMA_GLO_CFG_REG,
1703			  RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
1704}
1705
1706static int mtk_eth_write_hwaddr(struct udevice *dev)
1707{
1708	struct eth_pdata *pdata = dev_get_plat(dev);
1709	struct mtk_eth_priv *priv = dev_get_priv(dev);
1710	unsigned char *mac = pdata->enetaddr;
1711	u32 macaddr_lsb, macaddr_msb;
1712
1713	macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
1714	macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
1715		      ((u32)mac[4] << 8) | (u32)mac[5];
1716
1717	mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
1718	mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
1719
1720	return 0;
1721}
1722
1723static int mtk_eth_send(struct udevice *dev, void *packet, int length)
1724{
1725	struct mtk_eth_priv *priv = dev_get_priv(dev);
1726	u32 idx = priv->tx_cpu_owner_idx0;
1727	struct mtk_tx_dma_v2 *txd;
1728	void *pkt_base;
1729
1730	txd = priv->tx_ring_noc + idx * priv->soc->txd_size;
1731
1732	if (!(txd->txd2 & PDMA_TXD2_DDONE)) {
1733		debug("mtk-eth: TX DMA descriptor ring is full\n");
1734		return -EPERM;
1735	}
1736
1737	pkt_base = (void *)phys_to_virt(txd->txd1);
1738	memcpy(pkt_base, packet, length);
1739	flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
1740			   roundup(length, ARCH_DMA_MINALIGN));
1741
1742	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1743	    MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1744		txd->txd2 = PDMA_TXD2_LS0 | PDMA_V2_TXD2_SDL0_SET(length);
1745	else
1746		txd->txd2 = PDMA_TXD2_LS0 | PDMA_V1_TXD2_SDL0_SET(length);
1747
1748	priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
1749	mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1750
1751	return 0;
1752}
1753
1754static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1755{
1756	struct mtk_eth_priv *priv = dev_get_priv(dev);
1757	u32 idx = priv->rx_dma_owner_idx0;
1758	struct mtk_rx_dma_v2 *rxd;
1759	uchar *pkt_base;
1760	u32 length;
1761
1762	rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1763
1764	if (!(rxd->rxd2 & PDMA_RXD2_DDONE)) {
1765		debug("mtk-eth: RX DMA descriptor ring is empty\n");
1766		return -EAGAIN;
1767	}
1768
1769	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1770	    MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1771		length = PDMA_V2_RXD2_PLEN0_GET(rxd->rxd2);
1772	else
1773		length = PDMA_V1_RXD2_PLEN0_GET(rxd->rxd2);
1774
1775	pkt_base = (void *)phys_to_virt(rxd->rxd1);
1776	invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
1777				roundup(length, ARCH_DMA_MINALIGN));
1778
1779	if (packetp)
1780		*packetp = pkt_base;
1781
1782	return length;
1783}
1784
1785static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1786{
1787	struct mtk_eth_priv *priv = dev_get_priv(dev);
1788	u32 idx = priv->rx_dma_owner_idx0;
1789	struct mtk_rx_dma_v2 *rxd;
1790
1791	rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1792
1793	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1794	    MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1795		rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1796	else
1797		rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1798
1799	mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1800	priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1801
1802	return 0;
1803}
1804
1805static int mtk_eth_probe(struct udevice *dev)
1806{
1807	struct eth_pdata *pdata = dev_get_plat(dev);
1808	struct mtk_eth_priv *priv = dev_get_priv(dev);
1809	ulong iobase = pdata->iobase;
1810	int ret;
1811
1812	/* Frame Engine Register Base */
1813	priv->fe_base = (void *)iobase;
1814
1815	/* GMAC Register Base */
1816	priv->gmac_base = (void *)(iobase + GMAC_BASE);
1817
1818	/* MDIO register */
1819	ret = mtk_mdio_register(dev);
1820	if (ret)
1821		return ret;
1822
1823	/* Prepare for tx/rx rings */
1824	priv->tx_ring_noc = (void *)
1825		noncached_alloc(priv->soc->txd_size * NUM_TX_DESC,
1826				ARCH_DMA_MINALIGN);
1827	priv->rx_ring_noc = (void *)
1828		noncached_alloc(priv->soc->rxd_size * NUM_RX_DESC,
1829				ARCH_DMA_MINALIGN);
1830
1831	/* Set MDC divider */
1832	mtk_eth_mdc_init(priv);
1833
1834	/* Set MAC mode */
1835	if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII ||
1836	    priv->phy_interface == PHY_INTERFACE_MODE_XGMII)
1837		mtk_xmac_init(priv);
1838	else
1839		mtk_mac_init(priv);
1840
1841	/* Probe phy if switch is not specified */
1842	if (priv->sw == SW_NONE)
1843		return mtk_phy_probe(dev);
1844
1845	/* Initialize switch */
1846	return mt753x_switch_init(priv);
1847}
1848
1849static int mtk_eth_remove(struct udevice *dev)
1850{
1851	struct mtk_eth_priv *priv = dev_get_priv(dev);
1852
1853	/* MDIO unregister */
1854	mdio_unregister(priv->mdio_bus);
1855	mdio_free(priv->mdio_bus);
1856
1857	/* Stop possibly started DMA */
1858	mtk_eth_stop(dev);
1859
1860	return 0;
1861}
1862
1863static int mtk_eth_of_to_plat(struct udevice *dev)
1864{
1865	struct eth_pdata *pdata = dev_get_plat(dev);
1866	struct mtk_eth_priv *priv = dev_get_priv(dev);
1867	struct ofnode_phandle_args args;
1868	struct regmap *regmap;
1869	const char *str;
1870	ofnode subnode;
1871	int ret;
1872
1873	priv->soc = (const struct mtk_soc_data *)dev_get_driver_data(dev);
1874	if (!priv->soc) {
1875		dev_err(dev, "missing soc compatible data\n");
1876		return -EINVAL;
1877	}
1878
1879	pdata->iobase = (phys_addr_t)dev_remap_addr(dev);
1880
1881	/* get corresponding ethsys phandle */
1882	ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1883					 &args);
1884	if (ret)
1885		return ret;
1886
1887	priv->ethsys_regmap = syscon_node_to_regmap(args.node);
1888	if (IS_ERR(priv->ethsys_regmap))
1889		return PTR_ERR(priv->ethsys_regmap);
1890
1891	if (MTK_HAS_CAPS(priv->soc->caps, MTK_INFRA)) {
1892		/* get corresponding infracfg phandle */
1893		ret = dev_read_phandle_with_args(dev, "mediatek,infracfg",
1894						 NULL, 0, 0, &args);
1895
1896		if (ret)
1897			return ret;
1898
1899		priv->infra_regmap = syscon_node_to_regmap(args.node);
1900		if (IS_ERR(priv->infra_regmap))
1901			return PTR_ERR(priv->infra_regmap);
1902	}
1903
1904	/* Reset controllers */
1905	ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1906	if (ret) {
1907		printf("error: Unable to get reset ctrl for frame engine\n");
1908		return ret;
1909	}
1910
1911	priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1912
1913	priv->mdc = 0;
1914	subnode = ofnode_find_subnode(dev_ofnode(dev), "mdio");
1915	if (ofnode_valid(subnode)) {
1916		priv->mdc = ofnode_read_u32_default(subnode, "clock-frequency", 2500000);
1917		if (priv->mdc > MDC_MAX_FREQ ||
1918		    priv->mdc < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1919			printf("error: MDIO clock frequency out of range\n");
1920			return -EINVAL;
1921		}
1922	}
1923
1924	/* Interface mode is required */
1925	pdata->phy_interface = dev_read_phy_mode(dev);
1926	priv->phy_interface = pdata->phy_interface;
1927	if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
1928		printf("error: phy-mode is not set\n");
1929		return -EINVAL;
1930	}
1931
1932	/* Force mode or autoneg */
1933	subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1934	if (ofnode_valid(subnode)) {
1935		priv->force_mode = 1;
1936		priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1937		priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1938
1939		if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
1940		    priv->speed != SPEED_1000 && priv->speed != SPEED_2500 &&
1941		    priv->speed != SPEED_10000) {
1942			printf("error: no valid speed set in fixed-link\n");
1943			return -EINVAL;
1944		}
1945	}
1946
1947	if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1948	    priv->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
1949		/* get corresponding sgmii phandle */
1950		ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1951						 NULL, 0, 0, &args);
1952		if (ret)
1953			return ret;
1954
1955		regmap = syscon_node_to_regmap(args.node);
1956
1957		if (IS_ERR(regmap))
1958			return PTR_ERR(regmap);
1959
1960		priv->sgmii_base = regmap_get_range(regmap, 0);
1961
1962		if (!priv->sgmii_base) {
1963			dev_err(dev, "Unable to find sgmii\n");
1964			return -ENODEV;
1965		}
1966
1967		priv->pn_swap = ofnode_read_bool(args.node, "pn_swap");
1968	} else if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII) {
1969		/* get corresponding usxgmii phandle */
1970		ret = dev_read_phandle_with_args(dev, "mediatek,usxgmiisys",
1971						 NULL, 0, 0, &args);
1972		if (ret)
1973			return ret;
1974
1975		priv->usxgmii_regmap = syscon_node_to_regmap(args.node);
1976		if (IS_ERR(priv->usxgmii_regmap))
1977			return PTR_ERR(priv->usxgmii_regmap);
1978
1979		/* get corresponding xfi_pextp phandle */
1980		ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pextp",
1981						 NULL, 0, 0, &args);
1982		if (ret)
1983			return ret;
1984
1985		priv->xfi_pextp_regmap = syscon_node_to_regmap(args.node);
1986		if (IS_ERR(priv->xfi_pextp_regmap))
1987			return PTR_ERR(priv->xfi_pextp_regmap);
1988
1989		/* get corresponding xfi_pll phandle */
1990		ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pll",
1991						 NULL, 0, 0, &args);
1992		if (ret)
1993			return ret;
1994
1995		priv->xfi_pll_regmap = syscon_node_to_regmap(args.node);
1996		if (IS_ERR(priv->xfi_pll_regmap))
1997			return PTR_ERR(priv->xfi_pll_regmap);
1998
1999		/* get corresponding toprgu phandle */
2000		ret = dev_read_phandle_with_args(dev, "mediatek,toprgu",
2001						 NULL, 0, 0, &args);
2002		if (ret)
2003			return ret;
2004
2005		priv->toprgu_regmap = syscon_node_to_regmap(args.node);
2006		if (IS_ERR(priv->toprgu_regmap))
2007			return PTR_ERR(priv->toprgu_regmap);
2008	}
2009
2010	/* check for switch first, otherwise phy will be used */
2011	priv->sw = SW_NONE;
2012	priv->switch_init = NULL;
2013	priv->switch_mac_control = NULL;
2014	str = dev_read_string(dev, "mediatek,switch");
2015
2016	if (str) {
2017		if (!strcmp(str, "mt7530")) {
2018			priv->sw = SW_MT7530;
2019			priv->switch_init = mt7530_setup;
2020			priv->switch_mac_control = mt7530_mac_control;
2021			priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
2022			priv->mt753x_reset_wait_time = 1000;
2023		} else if (!strcmp(str, "mt7531")) {
2024			priv->sw = SW_MT7531;
2025			priv->switch_init = mt7531_setup;
2026			priv->switch_mac_control = mt7531_mac_control;
2027			priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
2028			priv->mt753x_reset_wait_time = 200;
2029		} else if (!strcmp(str, "mt7988")) {
2030			priv->sw = SW_MT7988;
2031			priv->switch_init = mt7988_setup;
2032			priv->switch_mac_control = mt7988_mac_control;
2033			priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
2034			priv->mt753x_reset_wait_time = 50;
2035		} else {
2036			printf("error: unsupported switch\n");
2037			return -EINVAL;
2038		}
2039
2040		priv->mcm = dev_read_bool(dev, "mediatek,mcm");
2041		if (priv->mcm) {
2042			ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
2043			if (ret) {
2044				printf("error: no reset ctrl for mcm\n");
2045				return ret;
2046			}
2047		} else {
2048			gpio_request_by_name(dev, "reset-gpios", 0,
2049					     &priv->rst_gpio, GPIOD_IS_OUT);
2050		}
2051	} else {
2052		ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
2053						 0, &args);
2054		if (ret) {
2055			printf("error: phy-handle is not specified\n");
2056			return ret;
2057		}
2058
2059		priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
2060		if (priv->phy_addr < 0) {
2061			printf("error: phy address is not specified\n");
2062			return ret;
2063		}
2064	}
2065
2066	return 0;
2067}
2068
2069static const struct mtk_soc_data mt7988_data = {
2070	.caps = MT7988_CAPS,
2071	.ana_rgc3 = 0x128,
2072	.gdma_count = 3,
2073	.pdma_base = PDMA_V3_BASE,
2074	.txd_size = sizeof(struct mtk_tx_dma_v2),
2075	.rxd_size = sizeof(struct mtk_rx_dma_v2),
2076};
2077
2078static const struct mtk_soc_data mt7986_data = {
2079	.caps = MT7986_CAPS,
2080	.ana_rgc3 = 0x128,
2081	.gdma_count = 2,
2082	.pdma_base = PDMA_V2_BASE,
2083	.txd_size = sizeof(struct mtk_tx_dma_v2),
2084	.rxd_size = sizeof(struct mtk_rx_dma_v2),
2085};
2086
2087static const struct mtk_soc_data mt7981_data = {
2088	.caps = MT7981_CAPS,
2089	.ana_rgc3 = 0x128,
2090	.gdma_count = 2,
2091	.pdma_base = PDMA_V2_BASE,
2092	.txd_size = sizeof(struct mtk_tx_dma_v2),
2093	.rxd_size = sizeof(struct mtk_rx_dma_v2),
2094};
2095
2096static const struct mtk_soc_data mt7629_data = {
2097	.ana_rgc3 = 0x128,
2098	.gdma_count = 2,
2099	.pdma_base = PDMA_V1_BASE,
2100	.txd_size = sizeof(struct mtk_tx_dma),
2101	.rxd_size = sizeof(struct mtk_rx_dma),
2102};
2103
2104static const struct mtk_soc_data mt7623_data = {
2105	.caps = MT7623_CAPS,
2106	.gdma_count = 2,
2107	.pdma_base = PDMA_V1_BASE,
2108	.txd_size = sizeof(struct mtk_tx_dma),
2109	.rxd_size = sizeof(struct mtk_rx_dma),
2110};
2111
2112static const struct mtk_soc_data mt7622_data = {
2113	.ana_rgc3 = 0x2028,
2114	.gdma_count = 2,
2115	.pdma_base = PDMA_V1_BASE,
2116	.txd_size = sizeof(struct mtk_tx_dma),
2117	.rxd_size = sizeof(struct mtk_rx_dma),
2118};
2119
2120static const struct mtk_soc_data mt7621_data = {
2121	.caps = MT7621_CAPS,
2122	.gdma_count = 2,
2123	.pdma_base = PDMA_V1_BASE,
2124	.txd_size = sizeof(struct mtk_tx_dma),
2125	.rxd_size = sizeof(struct mtk_rx_dma),
2126};
2127
2128static const struct udevice_id mtk_eth_ids[] = {
2129	{ .compatible = "mediatek,mt7988-eth", .data = (ulong)&mt7988_data },
2130	{ .compatible = "mediatek,mt7986-eth", .data = (ulong)&mt7986_data },
2131	{ .compatible = "mediatek,mt7981-eth", .data = (ulong)&mt7981_data },
2132	{ .compatible = "mediatek,mt7629-eth", .data = (ulong)&mt7629_data },
2133	{ .compatible = "mediatek,mt7623-eth", .data = (ulong)&mt7623_data },
2134	{ .compatible = "mediatek,mt7622-eth", .data = (ulong)&mt7622_data },
2135	{ .compatible = "mediatek,mt7621-eth", .data = (ulong)&mt7621_data },
2136	{}
2137};
2138
2139static const struct eth_ops mtk_eth_ops = {
2140	.start = mtk_eth_start,
2141	.stop = mtk_eth_stop,
2142	.send = mtk_eth_send,
2143	.recv = mtk_eth_recv,
2144	.free_pkt = mtk_eth_free_pkt,
2145	.write_hwaddr = mtk_eth_write_hwaddr,
2146};
2147
2148U_BOOT_DRIVER(mtk_eth) = {
2149	.name = "mtk-eth",
2150	.id = UCLASS_ETH,
2151	.of_match = mtk_eth_ids,
2152	.of_to_plat = mtk_eth_of_to_plat,
2153	.plat_auto	= sizeof(struct eth_pdata),
2154	.probe = mtk_eth_probe,
2155	.remove = mtk_eth_remove,
2156	.ops = &mtk_eth_ops,
2157	.priv_auto	= sizeof(struct mtk_eth_priv),
2158	.flags = DM_FLAG_ALLOC_PRIV_DMA,
2159};
2160