1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
9#include <common.h>
10#include <cpu_func.h>
11#include <dm.h>
12#include <log.h>
13#include <malloc.h>
14#include <miiphy.h>
15#include <net.h>
16#include <regmap.h>
17#include <reset.h>
18#include <syscon.h>
19#include <wait_bit.h>
20#include <asm/cache.h>
21#include <asm/gpio.h>
22#include <asm/io.h>
23#include <dm/device_compat.h>
24#include <linux/delay.h>
25#include <linux/err.h>
26#include <linux/ioport.h>
27#include <linux/mdio.h>
28#include <linux/mii.h>
29#include <linux/printk.h>
30
31#include "mtk_eth.h"
32
33#define NUM_TX_DESC		24
34#define NUM_RX_DESC		24
35#define TX_TOTAL_BUF_SIZE	(NUM_TX_DESC * PKTSIZE_ALIGN)
36#define RX_TOTAL_BUF_SIZE	(NUM_RX_DESC * PKTSIZE_ALIGN)
37#define TOTAL_PKT_BUF_SIZE	(TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
38
39#define MT753X_NUM_PHYS		5
40#define MT753X_NUM_PORTS	7
41#define MT753X_DFL_SMI_ADDR	31
42#define MT753X_SMI_ADDR_MASK	0x1f
43
44#define MT753X_PHY_ADDR(base, addr) \
45	(((base) + (addr)) & 0x1f)
46
47#define GDMA_FWD_TO_CPU \
48	(0x20000000 | \
49	GDM_ICS_EN | \
50	GDM_TCS_EN | \
51	GDM_UCS_EN | \
52	STRP_CRC | \
53	(DP_PDMA << MYMAC_DP_S) | \
54	(DP_PDMA << BC_DP_S) | \
55	(DP_PDMA << MC_DP_S) | \
56	(DP_PDMA << UN_DP_S))
57
58#define GDMA_BRIDGE_TO_CPU \
59	(0xC0000000 | \
60	GDM_ICS_EN | \
61	GDM_TCS_EN | \
62	GDM_UCS_EN | \
63	(DP_PDMA << MYMAC_DP_S) | \
64	(DP_PDMA << BC_DP_S) | \
65	(DP_PDMA << MC_DP_S) | \
66	(DP_PDMA << UN_DP_S))
67
68#define GDMA_FWD_DISCARD \
69	(0x20000000 | \
70	GDM_ICS_EN | \
71	GDM_TCS_EN | \
72	GDM_UCS_EN | \
73	STRP_CRC | \
74	(DP_DISCARD << MYMAC_DP_S) | \
75	(DP_DISCARD << BC_DP_S) | \
76	(DP_DISCARD << MC_DP_S) | \
77	(DP_DISCARD << UN_DP_S))
78
79enum mtk_switch {
80	SW_NONE,
81	SW_MT7530,
82	SW_MT7531,
83	SW_MT7988,
84};
85
86/* struct mtk_soc_data -	This is the structure holding all differences
87 *				among various plaforms
88 * @caps			Flags shown the extra capability for the SoC
89 * @ana_rgc3:			The offset for register ANA_RGC3 related to
90 *				sgmiisys syscon
91 * @gdma_count:			Number of GDMAs
92 * @pdma_base:			Register base of PDMA block
93 * @txd_size:			Tx DMA descriptor size.
94 * @rxd_size:			Rx DMA descriptor size.
95 */
96struct mtk_soc_data {
97	u32 caps;
98	u32 ana_rgc3;
99	u32 gdma_count;
100	u32 pdma_base;
101	u32 txd_size;
102	u32 rxd_size;
103};
104
105struct mtk_eth_priv {
106	char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
107
108	void *tx_ring_noc;
109	void *rx_ring_noc;
110
111	int rx_dma_owner_idx0;
112	int tx_cpu_owner_idx0;
113
114	void __iomem *fe_base;
115	void __iomem *gmac_base;
116	void __iomem *sgmii_base;
117	void __iomem *gsw_base;
118
119	struct regmap *ethsys_regmap;
120
121	struct regmap *infra_regmap;
122
123	struct regmap *usxgmii_regmap;
124	struct regmap *xfi_pextp_regmap;
125	struct regmap *xfi_pll_regmap;
126	struct regmap *toprgu_regmap;
127
128	struct mii_dev *mdio_bus;
129	int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
130	int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
131	int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
132	int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
133			 u16 val);
134
135	const struct mtk_soc_data *soc;
136	int gmac_id;
137	int force_mode;
138	int speed;
139	int duplex;
140	int mdc;
141	bool pn_swap;
142
143	struct phy_device *phydev;
144	int phy_interface;
145	int phy_addr;
146
147	enum mtk_switch sw;
148	int (*switch_init)(struct mtk_eth_priv *priv);
149	void (*switch_mac_control)(struct mtk_eth_priv *priv, bool enable);
150	u32 mt753x_smi_addr;
151	u32 mt753x_phy_base;
152	u32 mt753x_pmcr;
153	u32 mt753x_reset_wait_time;
154
155	struct gpio_desc rst_gpio;
156	int mcm;
157
158	struct reset_ctl rst_fe;
159	struct reset_ctl rst_mcm;
160};
161
162static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
163{
164	writel(val, priv->fe_base + priv->soc->pdma_base + reg);
165}
166
167static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
168			 u32 set)
169{
170	clrsetbits_le32(priv->fe_base + priv->soc->pdma_base + reg, clr, set);
171}
172
173static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
174			   u32 val)
175{
176	u32 gdma_base;
177
178	if (no == 2)
179		gdma_base = GDMA3_BASE;
180	else if (no == 1)
181		gdma_base = GDMA2_BASE;
182	else
183		gdma_base = GDMA1_BASE;
184
185	writel(val, priv->fe_base + gdma_base + reg);
186}
187
188static void mtk_fe_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
189{
190	clrsetbits_le32(priv->fe_base + reg, clr, set);
191}
192
193static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
194{
195	return readl(priv->gmac_base + reg);
196}
197
198static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
199{
200	writel(val, priv->gmac_base + reg);
201}
202
203static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
204{
205	clrsetbits_le32(priv->gmac_base + reg, clr, set);
206}
207
208static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
209			   u32 set)
210{
211	uint val;
212
213	regmap_read(priv->ethsys_regmap, reg, &val);
214	val &= ~clr;
215	val |= set;
216	regmap_write(priv->ethsys_regmap, reg, val);
217}
218
219static void mtk_infra_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
220			  u32 set)
221{
222	uint val;
223
224	regmap_read(priv->infra_regmap, reg, &val);
225	val &= ~clr;
226	val |= set;
227	regmap_write(priv->infra_regmap, reg, val);
228}
229
230static u32 mtk_gsw_read(struct mtk_eth_priv *priv, u32 reg)
231{
232	return readl(priv->gsw_base + reg);
233}
234
235static void mtk_gsw_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
236{
237	writel(val, priv->gsw_base + reg);
238}
239
240/* Direct MDIO clause 22/45 access via SoC */
241static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
242		      u32 cmd, u32 st)
243{
244	int ret;
245	u32 val;
246
247	val = (st << MDIO_ST_S) |
248	      ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
249	      (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
250	      (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
251
252	if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
253		val |= data & MDIO_RW_DATA_M;
254
255	mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
256
257	ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
258				PHY_ACS_ST, 0, 5000, 0);
259	if (ret) {
260		pr_warn("MDIO access timeout\n");
261		return ret;
262	}
263
264	if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
265		val = mtk_gmac_read(priv, GMAC_PIAC_REG);
266		return val & MDIO_RW_DATA_M;
267	}
268
269	return 0;
270}
271
272/* Direct MDIO clause 22 read via SoC */
273static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
274{
275	return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
276}
277
278/* Direct MDIO clause 22 write via SoC */
279static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
280{
281	return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
282}
283
284/* Direct MDIO clause 45 read via SoC */
285static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
286{
287	int ret;
288
289	ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
290	if (ret)
291		return ret;
292
293	return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
294			  MDIO_ST_C45);
295}
296
297/* Direct MDIO clause 45 write via SoC */
298static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
299			 u16 reg, u16 val)
300{
301	int ret;
302
303	ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
304	if (ret)
305		return ret;
306
307	return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
308			  MDIO_ST_C45);
309}
310
311/* Indirect MDIO clause 45 read via MII registers */
312static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
313			    u16 reg)
314{
315	int ret;
316
317	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
318			      (MMD_ADDR << MMD_CMD_S) |
319			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
320	if (ret)
321		return ret;
322
323	ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
324	if (ret)
325		return ret;
326
327	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
328			      (MMD_DATA << MMD_CMD_S) |
329			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
330	if (ret)
331		return ret;
332
333	return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
334}
335
336/* Indirect MDIO clause 45 write via MII registers */
337static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
338			     u16 reg, u16 val)
339{
340	int ret;
341
342	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
343			      (MMD_ADDR << MMD_CMD_S) |
344			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
345	if (ret)
346		return ret;
347
348	ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
349	if (ret)
350		return ret;
351
352	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
353			      (MMD_DATA << MMD_CMD_S) |
354			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
355	if (ret)
356		return ret;
357
358	return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
359}
360
361/*
362 * MT7530 Internal Register Address Bits
363 * -------------------------------------------------------------------
364 * | 15  14  13  12  11  10   9   8   7   6 | 5   4   3   2 | 1   0  |
365 * |----------------------------------------|---------------|--------|
366 * |              Page Address              |  Reg Address  | Unused |
367 * -------------------------------------------------------------------
368 */
369
370static int mt753x_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
371{
372	int ret, low_word, high_word;
373
374	if (priv->sw == SW_MT7988) {
375		*data = mtk_gsw_read(priv, reg);
376		return 0;
377	}
378
379	/* Write page address */
380	ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
381	if (ret)
382		return ret;
383
384	/* Read low word */
385	low_word = mtk_mii_read(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf);
386	if (low_word < 0)
387		return low_word;
388
389	/* Read high word */
390	high_word = mtk_mii_read(priv, priv->mt753x_smi_addr, 0x10);
391	if (high_word < 0)
392		return high_word;
393
394	if (data)
395		*data = ((u32)high_word << 16) | (low_word & 0xffff);
396
397	return 0;
398}
399
400static int mt753x_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
401{
402	int ret;
403
404	if (priv->sw == SW_MT7988) {
405		mtk_gsw_write(priv, reg, data);
406		return 0;
407	}
408
409	/* Write page address */
410	ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
411	if (ret)
412		return ret;
413
414	/* Write low word */
415	ret = mtk_mii_write(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf,
416			    data & 0xffff);
417	if (ret)
418		return ret;
419
420	/* Write high word */
421	return mtk_mii_write(priv, priv->mt753x_smi_addr, 0x10, data >> 16);
422}
423
424static void mt753x_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
425			   u32 set)
426{
427	u32 val;
428
429	mt753x_reg_read(priv, reg, &val);
430	val &= ~clr;
431	val |= set;
432	mt753x_reg_write(priv, reg, val);
433}
434
435/* Indirect MDIO clause 22/45 access */
436static int mt7531_mii_rw(struct mtk_eth_priv *priv, int phy, int reg, u16 data,
437			 u32 cmd, u32 st)
438{
439	ulong timeout;
440	u32 val, timeout_ms;
441	int ret = 0;
442
443	val = (st << MDIO_ST_S) |
444	      ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
445	      ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
446	      ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
447
448	if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
449		val |= data & MDIO_RW_DATA_M;
450
451	mt753x_reg_write(priv, MT7531_PHY_IAC, val | PHY_ACS_ST);
452
453	timeout_ms = 100;
454	timeout = get_timer(0);
455	while (1) {
456		mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
457
458		if ((val & PHY_ACS_ST) == 0)
459			break;
460
461		if (get_timer(timeout) > timeout_ms)
462			return -ETIMEDOUT;
463	}
464
465	if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
466		mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
467		ret = val & MDIO_RW_DATA_M;
468	}
469
470	return ret;
471}
472
473static int mt7531_mii_ind_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
474{
475	u8 phy_addr;
476
477	if (phy >= MT753X_NUM_PHYS)
478		return -EINVAL;
479
480	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
481
482	return mt7531_mii_rw(priv, phy_addr, reg, 0, MDIO_CMD_READ,
483			     MDIO_ST_C22);
484}
485
486static int mt7531_mii_ind_write(struct mtk_eth_priv *priv, u8 phy, u8 reg,
487				u16 val)
488{
489	u8 phy_addr;
490
491	if (phy >= MT753X_NUM_PHYS)
492		return -EINVAL;
493
494	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
495
496	return mt7531_mii_rw(priv, phy_addr, reg, val, MDIO_CMD_WRITE,
497			     MDIO_ST_C22);
498}
499
500static int mt7531_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
501			       u16 reg)
502{
503	u8 phy_addr;
504	int ret;
505
506	if (addr >= MT753X_NUM_PHYS)
507		return -EINVAL;
508
509	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
510
511	ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
512			    MDIO_ST_C45);
513	if (ret)
514		return ret;
515
516	return mt7531_mii_rw(priv, phy_addr, devad, 0, MDIO_CMD_READ_C45,
517			     MDIO_ST_C45);
518}
519
520static int mt7531_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
521				u16 reg, u16 val)
522{
523	u8 phy_addr;
524	int ret;
525
526	if (addr >= MT753X_NUM_PHYS)
527		return 0;
528
529	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
530
531	ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
532			    MDIO_ST_C45);
533	if (ret)
534		return ret;
535
536	return mt7531_mii_rw(priv, phy_addr, devad, val, MDIO_CMD_WRITE,
537			     MDIO_ST_C45);
538}
539
540static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
541{
542	struct mtk_eth_priv *priv = bus->priv;
543
544	if (devad < 0)
545		return priv->mii_read(priv, addr, reg);
546	else
547		return priv->mmd_read(priv, addr, devad, reg);
548}
549
550static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
551			  u16 val)
552{
553	struct mtk_eth_priv *priv = bus->priv;
554
555	if (devad < 0)
556		return priv->mii_write(priv, addr, reg, val);
557	else
558		return priv->mmd_write(priv, addr, devad, reg, val);
559}
560
561static int mtk_mdio_register(struct udevice *dev)
562{
563	struct mtk_eth_priv *priv = dev_get_priv(dev);
564	struct mii_dev *mdio_bus = mdio_alloc();
565	int ret;
566
567	if (!mdio_bus)
568		return -ENOMEM;
569
570	/* Assign MDIO access APIs according to the switch/phy */
571	switch (priv->sw) {
572	case SW_MT7530:
573		priv->mii_read = mtk_mii_read;
574		priv->mii_write = mtk_mii_write;
575		priv->mmd_read = mtk_mmd_ind_read;
576		priv->mmd_write = mtk_mmd_ind_write;
577		break;
578	case SW_MT7531:
579	case SW_MT7988:
580		priv->mii_read = mt7531_mii_ind_read;
581		priv->mii_write = mt7531_mii_ind_write;
582		priv->mmd_read = mt7531_mmd_ind_read;
583		priv->mmd_write = mt7531_mmd_ind_write;
584		break;
585	default:
586		priv->mii_read = mtk_mii_read;
587		priv->mii_write = mtk_mii_write;
588		priv->mmd_read = mtk_mmd_read;
589		priv->mmd_write = mtk_mmd_write;
590	}
591
592	mdio_bus->read = mtk_mdio_read;
593	mdio_bus->write = mtk_mdio_write;
594	snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
595
596	mdio_bus->priv = (void *)priv;
597
598	ret = mdio_register(mdio_bus);
599
600	if (ret)
601		return ret;
602
603	priv->mdio_bus = mdio_bus;
604
605	return 0;
606}
607
608static int mt753x_core_reg_read(struct mtk_eth_priv *priv, u32 reg)
609{
610	u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
611
612	return priv->mmd_read(priv, phy_addr, 0x1f, reg);
613}
614
615static void mt753x_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
616{
617	u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
618
619	priv->mmd_write(priv, phy_addr, 0x1f, reg, val);
620}
621
622static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
623{
624	u32 ncpo1, ssc_delta;
625
626	switch (mode) {
627	case PHY_INTERFACE_MODE_RGMII:
628		ncpo1 = 0x0c80;
629		ssc_delta = 0x87;
630		break;
631	default:
632		printf("error: xMII mode %d not supported\n", mode);
633		return -EINVAL;
634	}
635
636	/* Disable MT7530 core clock */
637	mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
638
639	/* Disable MT7530 PLL */
640	mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
641			      (2 << RG_GSWPLL_POSDIV_200M_S) |
642			      (32 << RG_GSWPLL_FBKDIV_200M_S));
643
644	/* For MT7530 core clock = 500Mhz */
645	mt753x_core_reg_write(priv, CORE_GSWPLL_GRP2,
646			      (1 << RG_GSWPLL_POSDIV_500M_S) |
647			      (25 << RG_GSWPLL_FBKDIV_500M_S));
648
649	/* Enable MT7530 PLL */
650	mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
651			      (2 << RG_GSWPLL_POSDIV_200M_S) |
652			      (32 << RG_GSWPLL_FBKDIV_200M_S) |
653			      RG_GSWPLL_EN_PRE);
654
655	udelay(20);
656
657	mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
658
659	/* Setup the MT7530 TRGMII Tx Clock */
660	mt753x_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
661	mt753x_core_reg_write(priv, CORE_PLL_GROUP6, 0);
662	mt753x_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
663	mt753x_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
664	mt753x_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
665			      RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
666
667	mt753x_core_reg_write(priv, CORE_PLL_GROUP2,
668			      RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
669			      (1 << RG_SYSPLL_POSDIV_S));
670
671	mt753x_core_reg_write(priv, CORE_PLL_GROUP7,
672			      RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
673			      RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
674
675	/* Enable MT7530 core clock */
676	mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
677			      REG_GSWCK_EN | REG_TRGMIICK_EN);
678
679	return 0;
680}
681
682static void mt7530_mac_control(struct mtk_eth_priv *priv, bool enable)
683{
684	u32 pmcr = FORCE_MODE;
685
686	if (enable)
687		pmcr = priv->mt753x_pmcr;
688
689	mt753x_reg_write(priv, PMCR_REG(6), pmcr);
690}
691
692static int mt7530_setup(struct mtk_eth_priv *priv)
693{
694	u16 phy_addr, phy_val;
695	u32 val, txdrv;
696	int i;
697
698	if (!MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
699		/* Select 250MHz clk for RGMII mode */
700		mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
701			       ETHSYS_TRGMII_CLK_SEL362_5, 0);
702
703		txdrv = 8;
704	} else {
705		txdrv = 4;
706	}
707
708	/* Modify HWTRAP first to allow direct access to internal PHYs */
709	mt753x_reg_read(priv, HWTRAP_REG, &val);
710	val |= CHG_TRAP;
711	val &= ~C_MDIO_BPS;
712	mt753x_reg_write(priv, MHWTRAP_REG, val);
713
714	/* Calculate the phy base address */
715	val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
716	priv->mt753x_phy_base = (val | 0x7) + 1;
717
718	/* Turn off PHYs */
719	for (i = 0; i < MT753X_NUM_PHYS; i++) {
720		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
721		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
722		phy_val |= BMCR_PDOWN;
723		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
724	}
725
726	/* Force MAC link down before reset */
727	mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
728	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
729
730	/* MT7530 reset */
731	mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
732	udelay(100);
733
734	val = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
735	      MAC_MODE | FORCE_MODE |
736	      MAC_TX_EN | MAC_RX_EN |
737	      BKOFF_EN | BACKPR_EN |
738	      (SPEED_1000M << FORCE_SPD_S) |
739	      FORCE_DPX | FORCE_LINK;
740
741	/* MT7530 Port6: Forced 1000M/FD, FC disabled */
742	priv->mt753x_pmcr = val;
743
744	/* MT7530 Port5: Forced link down */
745	mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
746
747	/* Keep MAC link down before starting eth */
748	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
749
750	/* MT7530 Port6: Set to RGMII */
751	mt753x_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
752
753	/* Hardware Trap: Enable Port6, Disable Port5 */
754	mt753x_reg_read(priv, HWTRAP_REG, &val);
755	val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
756	       (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
757	       (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
758	val &= ~(C_MDIO_BPS | P6_INTF_DIS);
759	mt753x_reg_write(priv, MHWTRAP_REG, val);
760
761	/* Setup switch core pll */
762	mt7530_pad_clk_setup(priv, priv->phy_interface);
763
764	/* Lower Tx Driving for TRGMII path */
765	for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
766		mt753x_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
767				 (txdrv << TD_DM_DRVP_S) |
768				 (txdrv << TD_DM_DRVN_S));
769
770	for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
771		mt753x_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
772
773	/* Turn on PHYs */
774	for (i = 0; i < MT753X_NUM_PHYS; i++) {
775		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
776		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
777		phy_val &= ~BMCR_PDOWN;
778		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
779	}
780
781	return 0;
782}
783
784static void mt7531_core_pll_setup(struct mtk_eth_priv *priv, int mcm)
785{
786	/* Step 1 : Disable MT7531 COREPLL */
787	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, 0);
788
789	/* Step 2: switch to XTAL output */
790	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_CLKSW, SW_CLKSW);
791
792	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, 0);
793
794	/* Step 3: disable PLLGP and enable program PLLGP */
795	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_PLLGP, SW_PLLGP);
796
797	/* Step 4: program COREPLL output frequency to 500MHz */
798	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_POSDIV_M,
799		       2 << RG_COREPLL_POSDIV_S);
800	udelay(25);
801
802	/* Currently, support XTAL 25Mhz only */
803	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_M,
804		       0x140000 << RG_COREPLL_SDM_PCW_S);
805
806	/* Set feedback divide ratio update signal to high */
807	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG,
808		       RG_COREPLL_SDM_PCW_CHG);
809
810	/* Wait for at least 16 XTAL clocks */
811	udelay(10);
812
813	/* Step 5: set feedback divide ratio update signal to low */
814	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG, 0);
815
816	/* add enable 325M clock for SGMII */
817	mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
818
819	/* add enable 250SSC clock for RGMII */
820	mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
821
822	/*Step 6: Enable MT7531 PLL */
823	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, RG_COREPLL_EN);
824
825	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, EN_COREPLL);
826
827	udelay(25);
828}
829
830static int mt7531_port_sgmii_init(struct mtk_eth_priv *priv,
831				  u32 port)
832{
833	if (port != 5 && port != 6) {
834		printf("mt7531: port %d is not a SGMII port\n", port);
835		return -EINVAL;
836	}
837
838	/* Set SGMII GEN2 speed(2.5G) */
839	mt753x_reg_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
840		       SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
841
842	/* Disable SGMII AN */
843	mt753x_reg_rmw(priv, MT7531_PCS_CONTROL_1(port),
844		       SGMII_AN_ENABLE, 0);
845
846	/* SGMII force mode setting */
847	mt753x_reg_write(priv, MT7531_SGMII_MODE(port), SGMII_FORCE_MODE);
848
849	/* Release PHYA power down state */
850	mt753x_reg_rmw(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
851		       SGMII_PHYA_PWD, 0);
852
853	return 0;
854}
855
856static int mt7531_port_rgmii_init(struct mtk_eth_priv *priv, u32 port)
857{
858	u32 val;
859
860	if (port != 5) {
861		printf("error: RGMII mode is not available for port %d\n",
862		       port);
863		return -EINVAL;
864	}
865
866	mt753x_reg_read(priv, MT7531_CLKGEN_CTRL, &val);
867	val |= GP_CLK_EN;
868	val &= ~GP_MODE_M;
869	val |= GP_MODE_RGMII << GP_MODE_S;
870	val |= TXCLK_NO_REVERSE;
871	val |= RXCLK_NO_DELAY;
872	val &= ~CLK_SKEW_IN_M;
873	val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
874	val &= ~CLK_SKEW_OUT_M;
875	val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
876	mt753x_reg_write(priv, MT7531_CLKGEN_CTRL, val);
877
878	return 0;
879}
880
881static void mt7531_phy_setting(struct mtk_eth_priv *priv)
882{
883	int i;
884	u32 val;
885
886	for (i = 0; i < MT753X_NUM_PHYS; i++) {
887		/* Enable HW auto downshift */
888		priv->mii_write(priv, i, 0x1f, 0x1);
889		val = priv->mii_read(priv, i, PHY_EXT_REG_14);
890		val |= PHY_EN_DOWN_SHFIT;
891		priv->mii_write(priv, i, PHY_EXT_REG_14, val);
892
893		/* PHY link down power saving enable */
894		val = priv->mii_read(priv, i, PHY_EXT_REG_17);
895		val |= PHY_LINKDOWN_POWER_SAVING_EN;
896		priv->mii_write(priv, i, PHY_EXT_REG_17, val);
897
898		val = priv->mmd_read(priv, i, 0x1e, PHY_DEV1E_REG_0C6);
899		val &= ~PHY_POWER_SAVING_M;
900		val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
901		priv->mmd_write(priv, i, 0x1e, PHY_DEV1E_REG_0C6, val);
902	}
903}
904
905static void mt7531_mac_control(struct mtk_eth_priv *priv, bool enable)
906{
907	u32 pmcr = FORCE_MODE_LNK;
908
909	if (enable)
910		pmcr = priv->mt753x_pmcr;
911
912	mt753x_reg_write(priv, PMCR_REG(5), pmcr);
913	mt753x_reg_write(priv, PMCR_REG(6), pmcr);
914}
915
916static int mt7531_setup(struct mtk_eth_priv *priv)
917{
918	u16 phy_addr, phy_val;
919	u32 val;
920	u32 pmcr;
921	u32 port5_sgmii;
922	int i;
923
924	priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
925				MT753X_SMI_ADDR_MASK;
926
927	/* Turn off PHYs */
928	for (i = 0; i < MT753X_NUM_PHYS; i++) {
929		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
930		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
931		phy_val |= BMCR_PDOWN;
932		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
933	}
934
935	/* Force MAC link down before reset */
936	mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
937	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
938
939	/* Switch soft reset */
940	mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
941	udelay(100);
942
943	/* Enable MDC input Schmitt Trigger */
944	mt753x_reg_rmw(priv, MT7531_SMT0_IOLB, SMT_IOLB_5_SMI_MDC_EN,
945		       SMT_IOLB_5_SMI_MDC_EN);
946
947	mt7531_core_pll_setup(priv, priv->mcm);
948
949	mt753x_reg_read(priv, MT7531_TOP_SIG_SR, &val);
950	port5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
951
952	/* port5 support either RGMII or SGMII, port6 only support SGMII. */
953	switch (priv->phy_interface) {
954	case PHY_INTERFACE_MODE_RGMII:
955		if (!port5_sgmii)
956			mt7531_port_rgmii_init(priv, 5);
957		break;
958	case PHY_INTERFACE_MODE_2500BASEX:
959		mt7531_port_sgmii_init(priv, 6);
960		if (port5_sgmii)
961			mt7531_port_sgmii_init(priv, 5);
962		break;
963	default:
964		break;
965	}
966
967	pmcr = MT7531_FORCE_MODE |
968	       (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
969	       MAC_MODE | MAC_TX_EN | MAC_RX_EN |
970	       BKOFF_EN | BACKPR_EN |
971	       FORCE_RX_FC | FORCE_TX_FC |
972	       (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
973	       FORCE_LINK;
974
975	priv->mt753x_pmcr = pmcr;
976
977	/* Keep MAC link down before starting eth */
978	mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
979	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
980
981	/* Turn on PHYs */
982	for (i = 0; i < MT753X_NUM_PHYS; i++) {
983		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
984		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
985		phy_val &= ~BMCR_PDOWN;
986		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
987	}
988
989	mt7531_phy_setting(priv);
990
991	/* Enable Internal PHYs */
992	val = mt753x_core_reg_read(priv, CORE_PLL_GROUP4);
993	val |= MT7531_BYPASS_MODE;
994	val &= ~MT7531_POWER_ON_OFF;
995	mt753x_core_reg_write(priv, CORE_PLL_GROUP4, val);
996
997	return 0;
998}
999
1000static void mt7988_phy_setting(struct mtk_eth_priv *priv)
1001{
1002	u16 val;
1003	u32 i;
1004
1005	for (i = 0; i < MT753X_NUM_PHYS; i++) {
1006		/* Enable HW auto downshift */
1007		priv->mii_write(priv, i, 0x1f, 0x1);
1008		val = priv->mii_read(priv, i, PHY_EXT_REG_14);
1009		val |= PHY_EN_DOWN_SHFIT;
1010		priv->mii_write(priv, i, PHY_EXT_REG_14, val);
1011
1012		/* PHY link down power saving enable */
1013		val = priv->mii_read(priv, i, PHY_EXT_REG_17);
1014		val |= PHY_LINKDOWN_POWER_SAVING_EN;
1015		priv->mii_write(priv, i, PHY_EXT_REG_17, val);
1016	}
1017}
1018
1019static void mt7988_mac_control(struct mtk_eth_priv *priv, bool enable)
1020{
1021	u32 pmcr = FORCE_MODE_LNK;
1022
1023	if (enable)
1024		pmcr = priv->mt753x_pmcr;
1025
1026	mt753x_reg_write(priv, PMCR_REG(6), pmcr);
1027}
1028
1029static int mt7988_setup(struct mtk_eth_priv *priv)
1030{
1031	u16 phy_addr, phy_val;
1032	u32 pmcr;
1033	int i;
1034
1035	priv->gsw_base = regmap_get_range(priv->ethsys_regmap, 0) + GSW_BASE;
1036
1037	priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
1038				MT753X_SMI_ADDR_MASK;
1039
1040	/* Turn off PHYs */
1041	for (i = 0; i < MT753X_NUM_PHYS; i++) {
1042		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
1043		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
1044		phy_val |= BMCR_PDOWN;
1045		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
1046	}
1047
1048	switch (priv->phy_interface) {
1049	case PHY_INTERFACE_MODE_USXGMII:
1050		/* Use CPU bridge instead of actual USXGMII path */
1051
1052		/* Set GDM1 no drop */
1053		mtk_fe_rmw(priv, PSE_NO_DROP_CFG_REG, 0, PSE_NO_DROP_GDM1);
1054
1055		/* Enable GDM1 to GSW CPU bridge */
1056		mtk_gmac_rmw(priv, GMAC_MAC_MISC_REG, 0, BIT(0));
1057
1058		/* XGMAC force link up */
1059		mtk_gmac_rmw(priv, GMAC_XGMAC_STS_REG, 0, P1_XGMAC_FORCE_LINK);
1060
1061		/* Setup GSW CPU bridge IPG */
1062		mtk_gmac_rmw(priv, GMAC_GSW_CFG_REG, GSWTX_IPG_M | GSWRX_IPG_M,
1063			     (0xB << GSWTX_IPG_S) | (0xB << GSWRX_IPG_S));
1064		break;
1065	default:
1066		printf("Error: MT7988 GSW does not support %s interface\n",
1067		       phy_string_for_interface(priv->phy_interface));
1068		break;
1069	}
1070
1071	pmcr = MT7988_FORCE_MODE |
1072	       (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1073	       MAC_MODE | MAC_TX_EN | MAC_RX_EN |
1074	       BKOFF_EN | BACKPR_EN |
1075	       FORCE_RX_FC | FORCE_TX_FC |
1076	       (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
1077	       FORCE_LINK;
1078
1079	priv->mt753x_pmcr = pmcr;
1080
1081	/* Keep MAC link down before starting eth */
1082	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
1083
1084	/* Turn on PHYs */
1085	for (i = 0; i < MT753X_NUM_PHYS; i++) {
1086		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
1087		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
1088		phy_val &= ~BMCR_PDOWN;
1089		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
1090	}
1091
1092	mt7988_phy_setting(priv);
1093
1094	return 0;
1095}
1096
1097static int mt753x_switch_init(struct mtk_eth_priv *priv)
1098{
1099	int ret;
1100	int i;
1101
1102	/* Global reset switch */
1103	if (priv->mcm) {
1104		reset_assert(&priv->rst_mcm);
1105		udelay(1000);
1106		reset_deassert(&priv->rst_mcm);
1107		mdelay(priv->mt753x_reset_wait_time);
1108	} else if (dm_gpio_is_valid(&priv->rst_gpio)) {
1109		dm_gpio_set_value(&priv->rst_gpio, 0);
1110		udelay(1000);
1111		dm_gpio_set_value(&priv->rst_gpio, 1);
1112		mdelay(priv->mt753x_reset_wait_time);
1113	}
1114
1115	ret = priv->switch_init(priv);
1116	if (ret)
1117		return ret;
1118
1119	/* Set port isolation */
1120	for (i = 0; i < MT753X_NUM_PORTS; i++) {
1121		/* Set port matrix mode */
1122		if (i != 6)
1123			mt753x_reg_write(priv, PCR_REG(i),
1124					 (0x40 << PORT_MATRIX_S));
1125		else
1126			mt753x_reg_write(priv, PCR_REG(i),
1127					 (0x3f << PORT_MATRIX_S));
1128
1129		/* Set port mode to user port */
1130		mt753x_reg_write(priv, PVC_REG(i),
1131				 (0x8100 << STAG_VPID_S) |
1132				 (VLAN_ATTR_USER << VLAN_ATTR_S));
1133	}
1134
1135	return 0;
1136}
1137
1138static void mtk_xphy_link_adjust(struct mtk_eth_priv *priv)
1139{
1140	u16 lcl_adv = 0, rmt_adv = 0;
1141	u8 flowctrl;
1142	u32 mcr;
1143
1144	mcr = mtk_gmac_read(priv, XGMAC_PORT_MCR(priv->gmac_id));
1145	mcr &= ~(XGMAC_FORCE_TX_FC | XGMAC_FORCE_RX_FC);
1146
1147	if (priv->phydev->duplex) {
1148		if (priv->phydev->pause)
1149			rmt_adv = LPA_PAUSE_CAP;
1150		if (priv->phydev->asym_pause)
1151			rmt_adv |= LPA_PAUSE_ASYM;
1152
1153		if (priv->phydev->advertising & ADVERTISED_Pause)
1154			lcl_adv |= ADVERTISE_PAUSE_CAP;
1155		if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1156			lcl_adv |= ADVERTISE_PAUSE_ASYM;
1157
1158		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1159
1160		if (flowctrl & FLOW_CTRL_TX)
1161			mcr |= XGMAC_FORCE_TX_FC;
1162		if (flowctrl & FLOW_CTRL_RX)
1163			mcr |= XGMAC_FORCE_RX_FC;
1164
1165		debug("rx pause %s, tx pause %s\n",
1166		      flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1167		      flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1168	}
1169
1170	mcr &= ~(XGMAC_TRX_DISABLE);
1171	mtk_gmac_write(priv, XGMAC_PORT_MCR(priv->gmac_id), mcr);
1172}
1173
1174static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
1175{
1176	u16 lcl_adv = 0, rmt_adv = 0;
1177	u8 flowctrl;
1178	u32 mcr;
1179
1180	mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1181	      (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1182	      MAC_MODE | FORCE_MODE |
1183	      MAC_TX_EN | MAC_RX_EN |
1184	      DEL_RXFIFO_CLR |
1185	      BKOFF_EN | BACKPR_EN;
1186
1187	switch (priv->phydev->speed) {
1188	case SPEED_10:
1189		mcr |= (SPEED_10M << FORCE_SPD_S);
1190		break;
1191	case SPEED_100:
1192		mcr |= (SPEED_100M << FORCE_SPD_S);
1193		break;
1194	case SPEED_1000:
1195	case SPEED_2500:
1196		mcr |= (SPEED_1000M << FORCE_SPD_S);
1197		break;
1198	};
1199
1200	if (priv->phydev->link)
1201		mcr |= FORCE_LINK;
1202
1203	if (priv->phydev->duplex) {
1204		mcr |= FORCE_DPX;
1205
1206		if (priv->phydev->pause)
1207			rmt_adv = LPA_PAUSE_CAP;
1208		if (priv->phydev->asym_pause)
1209			rmt_adv |= LPA_PAUSE_ASYM;
1210
1211		if (priv->phydev->advertising & ADVERTISED_Pause)
1212			lcl_adv |= ADVERTISE_PAUSE_CAP;
1213		if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1214			lcl_adv |= ADVERTISE_PAUSE_ASYM;
1215
1216		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1217
1218		if (flowctrl & FLOW_CTRL_TX)
1219			mcr |= FORCE_TX_FC;
1220		if (flowctrl & FLOW_CTRL_RX)
1221			mcr |= FORCE_RX_FC;
1222
1223		debug("rx pause %s, tx pause %s\n",
1224		      flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1225		      flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1226	}
1227
1228	mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1229}
1230
1231static int mtk_phy_start(struct mtk_eth_priv *priv)
1232{
1233	struct phy_device *phydev = priv->phydev;
1234	int ret;
1235
1236	ret = phy_startup(phydev);
1237
1238	if (ret) {
1239		debug("Could not initialize PHY %s\n", phydev->dev->name);
1240		return ret;
1241	}
1242
1243	if (!phydev->link) {
1244		debug("%s: link down.\n", phydev->dev->name);
1245		return 0;
1246	}
1247
1248	if (!priv->force_mode) {
1249		if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII ||
1250		    priv->phy_interface == PHY_INTERFACE_MODE_XGMII)
1251			mtk_xphy_link_adjust(priv);
1252		else
1253			mtk_phy_link_adjust(priv);
1254	}
1255
1256	debug("Speed: %d, %s duplex%s\n", phydev->speed,
1257	      (phydev->duplex) ? "full" : "half",
1258	      (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
1259
1260	return 0;
1261}
1262
1263static int mtk_phy_probe(struct udevice *dev)
1264{
1265	struct mtk_eth_priv *priv = dev_get_priv(dev);
1266	struct phy_device *phydev;
1267
1268	phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
1269			     priv->phy_interface);
1270	if (!phydev)
1271		return -ENODEV;
1272
1273	phydev->supported &= PHY_GBIT_FEATURES;
1274	phydev->advertising = phydev->supported;
1275
1276	priv->phydev = phydev;
1277	phy_config(phydev);
1278
1279	return 0;
1280}
1281
1282static void mtk_sgmii_an_init(struct mtk_eth_priv *priv)
1283{
1284	/* Set SGMII GEN1 speed(1G) */
1285	clrsetbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1286			SGMSYS_SPEED_2500, 0);
1287
1288	/* Enable SGMII AN */
1289	setbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1290		     SGMII_AN_ENABLE);
1291
1292	/* SGMII AN mode setting */
1293	writel(SGMII_AN_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1294
1295	/* SGMII PN SWAP setting */
1296	if (priv->pn_swap) {
1297		setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1298			     SGMII_PN_SWAP_TX_RX);
1299	}
1300
1301	/* Release PHYA power down state */
1302	clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1303			SGMII_PHYA_PWD, 0);
1304}
1305
1306static void mtk_sgmii_force_init(struct mtk_eth_priv *priv)
1307{
1308	/* Set SGMII GEN2 speed(2.5G) */
1309	setbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1310		     SGMSYS_SPEED_2500);
1311
1312	/* Disable SGMII AN */
1313	clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1314			SGMII_AN_ENABLE, 0);
1315
1316	/* SGMII force mode setting */
1317	writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1318
1319	/* SGMII PN SWAP setting */
1320	if (priv->pn_swap) {
1321		setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1322			     SGMII_PN_SWAP_TX_RX);
1323	}
1324
1325	/* Release PHYA power down state */
1326	clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1327			SGMII_PHYA_PWD, 0);
1328}
1329
1330static void mtk_xfi_pll_enable(struct mtk_eth_priv *priv)
1331{
1332	u32 val = 0;
1333
1334	/* Add software workaround for USXGMII PLL TCL issue */
1335	regmap_write(priv->xfi_pll_regmap, XFI_PLL_ANA_GLB8,
1336		     RG_XFI_PLL_ANA_SWWA);
1337
1338	regmap_read(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, &val);
1339	val |= RG_XFI_PLL_EN;
1340	regmap_write(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, val);
1341}
1342
1343static void mtk_usxgmii_reset(struct mtk_eth_priv *priv)
1344{
1345	switch (priv->gmac_id) {
1346	case 1:
1347		regmap_write(priv->toprgu_regmap, 0xFC, 0x0000A004);
1348		regmap_write(priv->toprgu_regmap, 0x18, 0x88F0A004);
1349		regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1350		regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1351		regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1352		break;
1353	case 2:
1354		regmap_write(priv->toprgu_regmap, 0xFC, 0x00005002);
1355		regmap_write(priv->toprgu_regmap, 0x18, 0x88F05002);
1356		regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1357		regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1358		regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1359		break;
1360	}
1361
1362	mdelay(10);
1363}
1364
1365static void mtk_usxgmii_setup_phya_an_10000(struct mtk_eth_priv *priv)
1366{
1367	regmap_write(priv->usxgmii_regmap, 0x810, 0x000FFE6D);
1368	regmap_write(priv->usxgmii_regmap, 0x818, 0x07B1EC7B);
1369	regmap_write(priv->usxgmii_regmap, 0x80C, 0x30000000);
1370	ndelay(1020);
1371	regmap_write(priv->usxgmii_regmap, 0x80C, 0x10000000);
1372	ndelay(1020);
1373	regmap_write(priv->usxgmii_regmap, 0x80C, 0x00000000);
1374
1375	regmap_write(priv->xfi_pextp_regmap, 0x9024, 0x00C9071C);
1376	regmap_write(priv->xfi_pextp_regmap, 0x2020, 0xAA8585AA);
1377	regmap_write(priv->xfi_pextp_regmap, 0x2030, 0x0C020707);
1378	regmap_write(priv->xfi_pextp_regmap, 0x2034, 0x0E050F0F);
1379	regmap_write(priv->xfi_pextp_regmap, 0x2040, 0x00140032);
1380	regmap_write(priv->xfi_pextp_regmap, 0x50F0, 0x00C014AA);
1381	regmap_write(priv->xfi_pextp_regmap, 0x50E0, 0x3777C12B);
1382	regmap_write(priv->xfi_pextp_regmap, 0x506C, 0x005F9CFF);
1383	regmap_write(priv->xfi_pextp_regmap, 0x5070, 0x9D9DFAFA);
1384	regmap_write(priv->xfi_pextp_regmap, 0x5074, 0x27273F3F);
1385	regmap_write(priv->xfi_pextp_regmap, 0x5078, 0xA7883C68);
1386	regmap_write(priv->xfi_pextp_regmap, 0x507C, 0x11661166);
1387	regmap_write(priv->xfi_pextp_regmap, 0x5080, 0x0E000AAF);
1388	regmap_write(priv->xfi_pextp_regmap, 0x5084, 0x08080D0D);
1389	regmap_write(priv->xfi_pextp_regmap, 0x5088, 0x02030909);
1390	regmap_write(priv->xfi_pextp_regmap, 0x50E4, 0x0C0C0000);
1391	regmap_write(priv->xfi_pextp_regmap, 0x50E8, 0x04040000);
1392	regmap_write(priv->xfi_pextp_regmap, 0x50EC, 0x0F0F0C06);
1393	regmap_write(priv->xfi_pextp_regmap, 0x50A8, 0x506E8C8C);
1394	regmap_write(priv->xfi_pextp_regmap, 0x6004, 0x18190000);
1395	regmap_write(priv->xfi_pextp_regmap, 0x00F8, 0x01423342);
1396	regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F20);
1397	regmap_write(priv->xfi_pextp_regmap, 0x0030, 0x00050C00);
1398	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x02002800);
1399	ndelay(1020);
1400	regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000020);
1401	regmap_write(priv->xfi_pextp_regmap, 0x3028, 0x00008A01);
1402	regmap_write(priv->xfi_pextp_regmap, 0x302C, 0x0000A884);
1403	regmap_write(priv->xfi_pextp_regmap, 0x3024, 0x00083002);
1404	regmap_write(priv->xfi_pextp_regmap, 0x3010, 0x00022220);
1405	regmap_write(priv->xfi_pextp_regmap, 0x5064, 0x0F020A01);
1406	regmap_write(priv->xfi_pextp_regmap, 0x50B4, 0x06100600);
1407	regmap_write(priv->xfi_pextp_regmap, 0x3048, 0x40704000);
1408	regmap_write(priv->xfi_pextp_regmap, 0x3050, 0xA8000000);
1409	regmap_write(priv->xfi_pextp_regmap, 0x3054, 0x000000AA);
1410	regmap_write(priv->xfi_pextp_regmap, 0x306C, 0x00000F00);
1411	regmap_write(priv->xfi_pextp_regmap, 0xA060, 0x00040000);
1412	regmap_write(priv->xfi_pextp_regmap, 0x90D0, 0x00000001);
1413	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200E800);
1414	udelay(150);
1415	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C111);
1416	ndelay(1020);
1417	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C101);
1418	udelay(15);
1419	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C111);
1420	ndelay(1020);
1421	regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C101);
1422	udelay(100);
1423	regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000030);
1424	regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F00);
1425	regmap_write(priv->xfi_pextp_regmap, 0x3040, 0x30000000);
1426	udelay(400);
1427}
1428
1429static void mtk_usxgmii_an_init(struct mtk_eth_priv *priv)
1430{
1431	mtk_xfi_pll_enable(priv);
1432	mtk_usxgmii_reset(priv);
1433	mtk_usxgmii_setup_phya_an_10000(priv);
1434}
1435
1436static void mtk_mac_init(struct mtk_eth_priv *priv)
1437{
1438	int i, ge_mode = 0;
1439	u32 mcr;
1440
1441	switch (priv->phy_interface) {
1442	case PHY_INTERFACE_MODE_RGMII_RXID:
1443	case PHY_INTERFACE_MODE_RGMII:
1444		ge_mode = GE_MODE_RGMII;
1445		break;
1446	case PHY_INTERFACE_MODE_SGMII:
1447	case PHY_INTERFACE_MODE_2500BASEX:
1448		if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC2_U3_QPHY)) {
1449			mtk_infra_rmw(priv, USB_PHY_SWITCH_REG, QPHY_SEL_MASK,
1450				      SGMII_QPHY_SEL);
1451		}
1452
1453		ge_mode = GE_MODE_RGMII;
1454		mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
1455			       SYSCFG0_SGMII_SEL(priv->gmac_id));
1456		if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
1457			mtk_sgmii_an_init(priv);
1458		else
1459			mtk_sgmii_force_init(priv);
1460		break;
1461	case PHY_INTERFACE_MODE_MII:
1462	case PHY_INTERFACE_MODE_GMII:
1463		ge_mode = GE_MODE_MII;
1464		break;
1465	case PHY_INTERFACE_MODE_RMII:
1466		ge_mode = GE_MODE_RMII;
1467		break;
1468	default:
1469		break;
1470	}
1471
1472	/* set the gmac to the right mode */
1473	mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1474		       SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1475		       ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
1476
1477	if (priv->force_mode) {
1478		mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1479		      (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1480		      MAC_MODE | FORCE_MODE |
1481		      MAC_TX_EN | MAC_RX_EN |
1482		      BKOFF_EN | BACKPR_EN |
1483		      FORCE_LINK;
1484
1485		switch (priv->speed) {
1486		case SPEED_10:
1487			mcr |= SPEED_10M << FORCE_SPD_S;
1488			break;
1489		case SPEED_100:
1490			mcr |= SPEED_100M << FORCE_SPD_S;
1491			break;
1492		case SPEED_1000:
1493		case SPEED_2500:
1494			mcr |= SPEED_1000M << FORCE_SPD_S;
1495			break;
1496		}
1497
1498		if (priv->duplex)
1499			mcr |= FORCE_DPX;
1500
1501		mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1502	}
1503
1504	if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC1_TRGMII) &&
1505	    !MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
1506		/* Lower Tx Driving for TRGMII path */
1507		for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
1508			mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
1509				       (8 << TD_DM_DRVP_S) |
1510				       (8 << TD_DM_DRVN_S));
1511
1512		mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
1513			     RX_RST | RXC_DQSISEL);
1514		mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
1515	}
1516}
1517
1518static void mtk_xmac_init(struct mtk_eth_priv *priv)
1519{
1520	u32 force_link = 0;
1521
1522	switch (priv->phy_interface) {
1523	case PHY_INTERFACE_MODE_USXGMII:
1524		mtk_usxgmii_an_init(priv);
1525		break;
1526	default:
1527		break;
1528	}
1529
1530	/* Set GMAC to the correct mode */
1531	mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1532		       SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1533		       0);
1534
1535	if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII &&
1536	    priv->gmac_id == 1) {
1537		mtk_infra_rmw(priv, TOPMISC_NETSYS_PCS_MUX,
1538			      NETSYS_PCS_MUX_MASK, MUX_G2_USXGMII_SEL);
1539	}
1540
1541	if (priv->phy_interface == PHY_INTERFACE_MODE_XGMII ||
1542	    priv->gmac_id == 2)
1543		force_link = XGMAC_FORCE_LINK(priv->gmac_id);
1544
1545	mtk_gmac_rmw(priv, XGMAC_STS(priv->gmac_id),
1546		     XGMAC_FORCE_LINK(priv->gmac_id), force_link);
1547
1548	/* Force GMAC link down */
1549	mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), FORCE_MODE);
1550}
1551
1552static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
1553{
1554	char *pkt_base = priv->pkt_pool;
1555	struct mtk_tx_dma_v2 *txd;
1556	struct mtk_rx_dma_v2 *rxd;
1557	int i;
1558
1559	mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
1560	udelay(500);
1561
1562	memset(priv->tx_ring_noc, 0, NUM_TX_DESC * priv->soc->txd_size);
1563	memset(priv->rx_ring_noc, 0, NUM_RX_DESC * priv->soc->rxd_size);
1564	memset(priv->pkt_pool, 0xff, TOTAL_PKT_BUF_SIZE);
1565
1566	flush_dcache_range((ulong)pkt_base,
1567			   (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
1568
1569	priv->rx_dma_owner_idx0 = 0;
1570	priv->tx_cpu_owner_idx0 = 0;
1571
1572	for (i = 0; i < NUM_TX_DESC; i++) {
1573		txd = priv->tx_ring_noc + i * priv->soc->txd_size;
1574
1575		txd->txd1 = virt_to_phys(pkt_base);
1576		txd->txd2 = PDMA_TXD2_DDONE | PDMA_TXD2_LS0;
1577
1578		if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1579			txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id == 2 ?
1580							   15 : priv->gmac_id + 1);
1581		else if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1582			txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id + 1);
1583		else
1584			txd->txd4 = PDMA_V1_TXD4_FPORT_SET(priv->gmac_id + 1);
1585
1586		pkt_base += PKTSIZE_ALIGN;
1587	}
1588
1589	for (i = 0; i < NUM_RX_DESC; i++) {
1590		rxd = priv->rx_ring_noc + i * priv->soc->rxd_size;
1591
1592		rxd->rxd1 = virt_to_phys(pkt_base);
1593
1594		if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1595		    MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1596			rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1597		else
1598			rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1599
1600		pkt_base += PKTSIZE_ALIGN;
1601	}
1602
1603	mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
1604		       virt_to_phys(priv->tx_ring_noc));
1605	mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
1606	mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1607
1608	mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
1609		       virt_to_phys(priv->rx_ring_noc));
1610	mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
1611	mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
1612
1613	mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
1614}
1615
1616static void mtk_eth_mdc_init(struct mtk_eth_priv *priv)
1617{
1618	u32 divider;
1619
1620	if (priv->mdc == 0)
1621		return;
1622
1623	divider = min_t(u32, DIV_ROUND_UP(MDC_MAX_FREQ, priv->mdc), MDC_MAX_DIVIDER);
1624
1625	/* Configure MDC turbo mode */
1626	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1627		mtk_gmac_rmw(priv, GMAC_MAC_MISC_REG, 0, MISC_MDC_TURBO);
1628	else
1629		mtk_gmac_rmw(priv, GMAC_PPSC_REG, 0, MISC_MDC_TURBO);
1630
1631	/* Configure MDC divider */
1632	mtk_gmac_rmw(priv, GMAC_PPSC_REG, PHY_MDC_CFG,
1633		     FIELD_PREP(PHY_MDC_CFG, divider));
1634}
1635
1636static int mtk_eth_start(struct udevice *dev)
1637{
1638	struct mtk_eth_priv *priv = dev_get_priv(dev);
1639	int i, ret;
1640
1641	/* Reset FE */
1642	reset_assert(&priv->rst_fe);
1643	udelay(1000);
1644	reset_deassert(&priv->rst_fe);
1645	mdelay(10);
1646
1647	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1648	    MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1649		setbits_le32(priv->fe_base + FE_GLO_MISC_REG, PDMA_VER_V2);
1650
1651	/* Packets forward to PDMA */
1652	mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
1653
1654	for (i = 0; i < priv->soc->gdma_count; i++) {
1655		if (i == priv->gmac_id)
1656			continue;
1657
1658		mtk_gdma_write(priv, i, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1659	}
1660
1661	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3)) {
1662		if (priv->sw == SW_MT7988 && priv->gmac_id == 0) {
1663			mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG,
1664				       GDMA_BRIDGE_TO_CPU);
1665		}
1666
1667		mtk_gdma_write(priv, priv->gmac_id, GDMA_EG_CTRL_REG,
1668			       GDMA_CPU_BRIDGE_EN);
1669	}
1670
1671	udelay(500);
1672
1673	mtk_eth_fifo_init(priv);
1674
1675	if (priv->switch_mac_control)
1676		priv->switch_mac_control(priv, true);
1677
1678	/* Start PHY */
1679	if (priv->sw == SW_NONE) {
1680		ret = mtk_phy_start(priv);
1681		if (ret)
1682			return ret;
1683	}
1684
1685	mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
1686		     TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
1687	udelay(500);
1688
1689	return 0;
1690}
1691
1692static void mtk_eth_stop(struct udevice *dev)
1693{
1694	struct mtk_eth_priv *priv = dev_get_priv(dev);
1695
1696	if (priv->switch_mac_control)
1697		priv->switch_mac_control(priv, false);
1698
1699	mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
1700		     TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
1701	udelay(500);
1702
1703	wait_for_bit_le32(priv->fe_base + priv->soc->pdma_base + PDMA_GLO_CFG_REG,
1704			  RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
1705}
1706
1707static int mtk_eth_write_hwaddr(struct udevice *dev)
1708{
1709	struct eth_pdata *pdata = dev_get_plat(dev);
1710	struct mtk_eth_priv *priv = dev_get_priv(dev);
1711	unsigned char *mac = pdata->enetaddr;
1712	u32 macaddr_lsb, macaddr_msb;
1713
1714	macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
1715	macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
1716		      ((u32)mac[4] << 8) | (u32)mac[5];
1717
1718	mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
1719	mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
1720
1721	return 0;
1722}
1723
1724static int mtk_eth_send(struct udevice *dev, void *packet, int length)
1725{
1726	struct mtk_eth_priv *priv = dev_get_priv(dev);
1727	u32 idx = priv->tx_cpu_owner_idx0;
1728	struct mtk_tx_dma_v2 *txd;
1729	void *pkt_base;
1730
1731	txd = priv->tx_ring_noc + idx * priv->soc->txd_size;
1732
1733	if (!(txd->txd2 & PDMA_TXD2_DDONE)) {
1734		debug("mtk-eth: TX DMA descriptor ring is full\n");
1735		return -EPERM;
1736	}
1737
1738	pkt_base = (void *)phys_to_virt(txd->txd1);
1739	memcpy(pkt_base, packet, length);
1740	flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
1741			   roundup(length, ARCH_DMA_MINALIGN));
1742
1743	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1744	    MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1745		txd->txd2 = PDMA_TXD2_LS0 | PDMA_V2_TXD2_SDL0_SET(length);
1746	else
1747		txd->txd2 = PDMA_TXD2_LS0 | PDMA_V1_TXD2_SDL0_SET(length);
1748
1749	priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
1750	mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1751
1752	return 0;
1753}
1754
1755static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1756{
1757	struct mtk_eth_priv *priv = dev_get_priv(dev);
1758	u32 idx = priv->rx_dma_owner_idx0;
1759	struct mtk_rx_dma_v2 *rxd;
1760	uchar *pkt_base;
1761	u32 length;
1762
1763	rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1764
1765	if (!(rxd->rxd2 & PDMA_RXD2_DDONE)) {
1766		debug("mtk-eth: RX DMA descriptor ring is empty\n");
1767		return -EAGAIN;
1768	}
1769
1770	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1771	    MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1772		length = PDMA_V2_RXD2_PLEN0_GET(rxd->rxd2);
1773	else
1774		length = PDMA_V1_RXD2_PLEN0_GET(rxd->rxd2);
1775
1776	pkt_base = (void *)phys_to_virt(rxd->rxd1);
1777	invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
1778				roundup(length, ARCH_DMA_MINALIGN));
1779
1780	if (packetp)
1781		*packetp = pkt_base;
1782
1783	return length;
1784}
1785
1786static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1787{
1788	struct mtk_eth_priv *priv = dev_get_priv(dev);
1789	u32 idx = priv->rx_dma_owner_idx0;
1790	struct mtk_rx_dma_v2 *rxd;
1791
1792	rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1793
1794	if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1795	    MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1796		rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1797	else
1798		rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1799
1800	mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1801	priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1802
1803	return 0;
1804}
1805
1806static int mtk_eth_probe(struct udevice *dev)
1807{
1808	struct eth_pdata *pdata = dev_get_plat(dev);
1809	struct mtk_eth_priv *priv = dev_get_priv(dev);
1810	ulong iobase = pdata->iobase;
1811	int ret;
1812
1813	/* Frame Engine Register Base */
1814	priv->fe_base = (void *)iobase;
1815
1816	/* GMAC Register Base */
1817	priv->gmac_base = (void *)(iobase + GMAC_BASE);
1818
1819	/* MDIO register */
1820	ret = mtk_mdio_register(dev);
1821	if (ret)
1822		return ret;
1823
1824	/* Prepare for tx/rx rings */
1825	priv->tx_ring_noc = (void *)
1826		noncached_alloc(priv->soc->txd_size * NUM_TX_DESC,
1827				ARCH_DMA_MINALIGN);
1828	priv->rx_ring_noc = (void *)
1829		noncached_alloc(priv->soc->rxd_size * NUM_RX_DESC,
1830				ARCH_DMA_MINALIGN);
1831
1832	/* Set MDC divider */
1833	mtk_eth_mdc_init(priv);
1834
1835	/* Set MAC mode */
1836	if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII ||
1837	    priv->phy_interface == PHY_INTERFACE_MODE_XGMII)
1838		mtk_xmac_init(priv);
1839	else
1840		mtk_mac_init(priv);
1841
1842	/* Probe phy if switch is not specified */
1843	if (priv->sw == SW_NONE)
1844		return mtk_phy_probe(dev);
1845
1846	/* Initialize switch */
1847	return mt753x_switch_init(priv);
1848}
1849
1850static int mtk_eth_remove(struct udevice *dev)
1851{
1852	struct mtk_eth_priv *priv = dev_get_priv(dev);
1853
1854	/* MDIO unregister */
1855	mdio_unregister(priv->mdio_bus);
1856	mdio_free(priv->mdio_bus);
1857
1858	/* Stop possibly started DMA */
1859	mtk_eth_stop(dev);
1860
1861	return 0;
1862}
1863
1864static int mtk_eth_of_to_plat(struct udevice *dev)
1865{
1866	struct eth_pdata *pdata = dev_get_plat(dev);
1867	struct mtk_eth_priv *priv = dev_get_priv(dev);
1868	struct ofnode_phandle_args args;
1869	struct regmap *regmap;
1870	const char *str;
1871	ofnode subnode;
1872	int ret;
1873
1874	priv->soc = (const struct mtk_soc_data *)dev_get_driver_data(dev);
1875	if (!priv->soc) {
1876		dev_err(dev, "missing soc compatible data\n");
1877		return -EINVAL;
1878	}
1879
1880	pdata->iobase = (phys_addr_t)dev_remap_addr(dev);
1881
1882	/* get corresponding ethsys phandle */
1883	ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1884					 &args);
1885	if (ret)
1886		return ret;
1887
1888	priv->ethsys_regmap = syscon_node_to_regmap(args.node);
1889	if (IS_ERR(priv->ethsys_regmap))
1890		return PTR_ERR(priv->ethsys_regmap);
1891
1892	if (MTK_HAS_CAPS(priv->soc->caps, MTK_INFRA)) {
1893		/* get corresponding infracfg phandle */
1894		ret = dev_read_phandle_with_args(dev, "mediatek,infracfg",
1895						 NULL, 0, 0, &args);
1896
1897		if (ret)
1898			return ret;
1899
1900		priv->infra_regmap = syscon_node_to_regmap(args.node);
1901		if (IS_ERR(priv->infra_regmap))
1902			return PTR_ERR(priv->infra_regmap);
1903	}
1904
1905	/* Reset controllers */
1906	ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1907	if (ret) {
1908		printf("error: Unable to get reset ctrl for frame engine\n");
1909		return ret;
1910	}
1911
1912	priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1913
1914	priv->mdc = 0;
1915	subnode = ofnode_find_subnode(dev_ofnode(dev), "mdio");
1916	if (ofnode_valid(subnode)) {
1917		priv->mdc = ofnode_read_u32_default(subnode, "clock-frequency", 2500000);
1918		if (priv->mdc > MDC_MAX_FREQ ||
1919		    priv->mdc < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
1920			printf("error: MDIO clock frequency out of range\n");
1921			return -EINVAL;
1922		}
1923	}
1924
1925	/* Interface mode is required */
1926	pdata->phy_interface = dev_read_phy_mode(dev);
1927	priv->phy_interface = pdata->phy_interface;
1928	if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
1929		printf("error: phy-mode is not set\n");
1930		return -EINVAL;
1931	}
1932
1933	/* Force mode or autoneg */
1934	subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1935	if (ofnode_valid(subnode)) {
1936		priv->force_mode = 1;
1937		priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1938		priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1939
1940		if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
1941		    priv->speed != SPEED_1000 && priv->speed != SPEED_2500 &&
1942		    priv->speed != SPEED_10000) {
1943			printf("error: no valid speed set in fixed-link\n");
1944			return -EINVAL;
1945		}
1946	}
1947
1948	if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1949	    priv->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
1950		/* get corresponding sgmii phandle */
1951		ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1952						 NULL, 0, 0, &args);
1953		if (ret)
1954			return ret;
1955
1956		regmap = syscon_node_to_regmap(args.node);
1957
1958		if (IS_ERR(regmap))
1959			return PTR_ERR(regmap);
1960
1961		priv->sgmii_base = regmap_get_range(regmap, 0);
1962
1963		if (!priv->sgmii_base) {
1964			dev_err(dev, "Unable to find sgmii\n");
1965			return -ENODEV;
1966		}
1967
1968		priv->pn_swap = ofnode_read_bool(args.node, "pn_swap");
1969	} else if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII) {
1970		/* get corresponding usxgmii phandle */
1971		ret = dev_read_phandle_with_args(dev, "mediatek,usxgmiisys",
1972						 NULL, 0, 0, &args);
1973		if (ret)
1974			return ret;
1975
1976		priv->usxgmii_regmap = syscon_node_to_regmap(args.node);
1977		if (IS_ERR(priv->usxgmii_regmap))
1978			return PTR_ERR(priv->usxgmii_regmap);
1979
1980		/* get corresponding xfi_pextp phandle */
1981		ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pextp",
1982						 NULL, 0, 0, &args);
1983		if (ret)
1984			return ret;
1985
1986		priv->xfi_pextp_regmap = syscon_node_to_regmap(args.node);
1987		if (IS_ERR(priv->xfi_pextp_regmap))
1988			return PTR_ERR(priv->xfi_pextp_regmap);
1989
1990		/* get corresponding xfi_pll phandle */
1991		ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pll",
1992						 NULL, 0, 0, &args);
1993		if (ret)
1994			return ret;
1995
1996		priv->xfi_pll_regmap = syscon_node_to_regmap(args.node);
1997		if (IS_ERR(priv->xfi_pll_regmap))
1998			return PTR_ERR(priv->xfi_pll_regmap);
1999
2000		/* get corresponding toprgu phandle */
2001		ret = dev_read_phandle_with_args(dev, "mediatek,toprgu",
2002						 NULL, 0, 0, &args);
2003		if (ret)
2004			return ret;
2005
2006		priv->toprgu_regmap = syscon_node_to_regmap(args.node);
2007		if (IS_ERR(priv->toprgu_regmap))
2008			return PTR_ERR(priv->toprgu_regmap);
2009	}
2010
2011	/* check for switch first, otherwise phy will be used */
2012	priv->sw = SW_NONE;
2013	priv->switch_init = NULL;
2014	priv->switch_mac_control = NULL;
2015	str = dev_read_string(dev, "mediatek,switch");
2016
2017	if (str) {
2018		if (!strcmp(str, "mt7530")) {
2019			priv->sw = SW_MT7530;
2020			priv->switch_init = mt7530_setup;
2021			priv->switch_mac_control = mt7530_mac_control;
2022			priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
2023			priv->mt753x_reset_wait_time = 1000;
2024		} else if (!strcmp(str, "mt7531")) {
2025			priv->sw = SW_MT7531;
2026			priv->switch_init = mt7531_setup;
2027			priv->switch_mac_control = mt7531_mac_control;
2028			priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
2029			priv->mt753x_reset_wait_time = 200;
2030		} else if (!strcmp(str, "mt7988")) {
2031			priv->sw = SW_MT7988;
2032			priv->switch_init = mt7988_setup;
2033			priv->switch_mac_control = mt7988_mac_control;
2034			priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
2035			priv->mt753x_reset_wait_time = 50;
2036		} else {
2037			printf("error: unsupported switch\n");
2038			return -EINVAL;
2039		}
2040
2041		priv->mcm = dev_read_bool(dev, "mediatek,mcm");
2042		if (priv->mcm) {
2043			ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
2044			if (ret) {
2045				printf("error: no reset ctrl for mcm\n");
2046				return ret;
2047			}
2048		} else {
2049			gpio_request_by_name(dev, "reset-gpios", 0,
2050					     &priv->rst_gpio, GPIOD_IS_OUT);
2051		}
2052	} else {
2053		ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
2054						 0, &args);
2055		if (ret) {
2056			printf("error: phy-handle is not specified\n");
2057			return ret;
2058		}
2059
2060		priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
2061		if (priv->phy_addr < 0) {
2062			printf("error: phy address is not specified\n");
2063			return ret;
2064		}
2065	}
2066
2067	return 0;
2068}
2069
2070static const struct mtk_soc_data mt7988_data = {
2071	.caps = MT7988_CAPS,
2072	.ana_rgc3 = 0x128,
2073	.gdma_count = 3,
2074	.pdma_base = PDMA_V3_BASE,
2075	.txd_size = sizeof(struct mtk_tx_dma_v2),
2076	.rxd_size = sizeof(struct mtk_rx_dma_v2),
2077};
2078
2079static const struct mtk_soc_data mt7986_data = {
2080	.caps = MT7986_CAPS,
2081	.ana_rgc3 = 0x128,
2082	.gdma_count = 2,
2083	.pdma_base = PDMA_V2_BASE,
2084	.txd_size = sizeof(struct mtk_tx_dma_v2),
2085	.rxd_size = sizeof(struct mtk_rx_dma_v2),
2086};
2087
2088static const struct mtk_soc_data mt7981_data = {
2089	.caps = MT7981_CAPS,
2090	.ana_rgc3 = 0x128,
2091	.gdma_count = 2,
2092	.pdma_base = PDMA_V2_BASE,
2093	.txd_size = sizeof(struct mtk_tx_dma_v2),
2094	.rxd_size = sizeof(struct mtk_rx_dma_v2),
2095};
2096
2097static const struct mtk_soc_data mt7629_data = {
2098	.ana_rgc3 = 0x128,
2099	.gdma_count = 2,
2100	.pdma_base = PDMA_V1_BASE,
2101	.txd_size = sizeof(struct mtk_tx_dma),
2102	.rxd_size = sizeof(struct mtk_rx_dma),
2103};
2104
2105static const struct mtk_soc_data mt7623_data = {
2106	.caps = MT7623_CAPS,
2107	.gdma_count = 2,
2108	.pdma_base = PDMA_V1_BASE,
2109	.txd_size = sizeof(struct mtk_tx_dma),
2110	.rxd_size = sizeof(struct mtk_rx_dma),
2111};
2112
2113static const struct mtk_soc_data mt7622_data = {
2114	.ana_rgc3 = 0x2028,
2115	.gdma_count = 2,
2116	.pdma_base = PDMA_V1_BASE,
2117	.txd_size = sizeof(struct mtk_tx_dma),
2118	.rxd_size = sizeof(struct mtk_rx_dma),
2119};
2120
2121static const struct mtk_soc_data mt7621_data = {
2122	.caps = MT7621_CAPS,
2123	.gdma_count = 2,
2124	.pdma_base = PDMA_V1_BASE,
2125	.txd_size = sizeof(struct mtk_tx_dma),
2126	.rxd_size = sizeof(struct mtk_rx_dma),
2127};
2128
2129static const struct udevice_id mtk_eth_ids[] = {
2130	{ .compatible = "mediatek,mt7988-eth", .data = (ulong)&mt7988_data },
2131	{ .compatible = "mediatek,mt7986-eth", .data = (ulong)&mt7986_data },
2132	{ .compatible = "mediatek,mt7981-eth", .data = (ulong)&mt7981_data },
2133	{ .compatible = "mediatek,mt7629-eth", .data = (ulong)&mt7629_data },
2134	{ .compatible = "mediatek,mt7623-eth", .data = (ulong)&mt7623_data },
2135	{ .compatible = "mediatek,mt7622-eth", .data = (ulong)&mt7622_data },
2136	{ .compatible = "mediatek,mt7621-eth", .data = (ulong)&mt7621_data },
2137	{}
2138};
2139
2140static const struct eth_ops mtk_eth_ops = {
2141	.start = mtk_eth_start,
2142	.stop = mtk_eth_stop,
2143	.send = mtk_eth_send,
2144	.recv = mtk_eth_recv,
2145	.free_pkt = mtk_eth_free_pkt,
2146	.write_hwaddr = mtk_eth_write_hwaddr,
2147};
2148
2149U_BOOT_DRIVER(mtk_eth) = {
2150	.name = "mtk-eth",
2151	.id = UCLASS_ETH,
2152	.of_match = mtk_eth_ids,
2153	.of_to_plat = mtk_eth_of_to_plat,
2154	.plat_auto	= sizeof(struct eth_pdata),
2155	.probe = mtk_eth_probe,
2156	.remove = mtk_eth_remove,
2157	.ops = &mtk_eth_ops,
2158	.priv_auto	= sizeof(struct mtk_eth_priv),
2159	.flags = DM_FLAG_ALLOC_PRIV_DMA,
2160};
2161