1// SPDX-License-Identifier: GPL-2.0+
2
3/*
4 * Copyright (C) 2020 Cortina Access Inc.
5 * Author: Aaron Tseng <aaron.tseng@cortina-access.com>
6 *
7 * Ethernet MAC Driver for all supported CAxxxx SoCs
8 */
9
10#include <command.h>
11#include <malloc.h>
12#include <net.h>
13#include <miiphy.h>
14#include <env.h>
15#include <linux/delay.h>
16#include <linux/bitops.h>
17#include <u-boot/crc.h>
18#include <led.h>
19
20#include "cortina_ni.h"
21
22#define HEADER_A_SIZE	8
23
24enum ca_led_state_t {
25	CA_LED_OFF = 0,
26	CA_LED_ON = 1,
27};
28
29enum ca_port_t {
30	NI_PORT_0 = 0,
31	NI_PORT_1,
32	NI_PORT_2,
33	NI_PORT_3,
34	NI_PORT_4,
35	NI_PORT_5,
36	NI_PORT_MAX,
37};
38
39static struct udevice *curr_dev;
40
41static u32 *ca_rdwrptr_adv_one(u32 *x, unsigned long base, unsigned long max)
42{
43	if (x + 1 >= (u32 *)max)
44		return (u32 *)base;
45	else
46		return (x + 1);
47}
48
49static void ca_reg_read(void *reg, u64 base, u64 offset)
50{
51	u32 *val = (u32 *)reg;
52
53	*val = readl(KSEG1_ATU_XLAT(base + offset));
54}
55
56static void ca_reg_write(void *reg, u64 base, u64 offset)
57{
58	u32 val = *(u32 *)reg;
59
60	writel(val, KSEG1_ATU_XLAT(base + offset));
61}
62
63static int ca_mdio_write_rgmii(u32 addr, u32 offset, u16 data)
64{
65	/* up to 10000 cycles*/
66	u32 loop_wait = __MDIO_ACCESS_TIMEOUT;
67	struct PER_MDIO_ADDR_t mdio_addr;
68	struct PER_MDIO_CTRL_t mdio_ctrl;
69	struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
70
71	memset(&mdio_addr, 0, sizeof(mdio_addr));
72	mdio_addr.mdio_addr = addr;
73	mdio_addr.mdio_offset = offset;
74	mdio_addr.mdio_rd_wr = __MDIO_WR_FLAG;
75	ca_reg_write(&mdio_addr, (u64)priv->per_mdio_base_addr,
76		     PER_MDIO_ADDR_OFFSET);
77	ca_reg_write(&data, (u64)priv->per_mdio_base_addr,
78		     PER_MDIO_WRDATA_OFFSET);
79
80	memset(&mdio_ctrl, 0, sizeof(mdio_ctrl));
81	mdio_ctrl.mdiostart = 1;
82	ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
83		     PER_MDIO_CTRL_OFFSET);
84
85	debug("%s: phy_addr=%d, offset=%d, data=0x%x\n",
86	      __func__, addr, offset, data);
87
88	do {
89		ca_reg_read(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
90			    PER_MDIO_CTRL_OFFSET);
91		if (mdio_ctrl.mdiodone) {
92			ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
93				     PER_MDIO_CTRL_OFFSET);
94			return 0;
95		}
96	} while (--loop_wait);
97
98	printf("CA NI %s: PHY write timeout!!!\n", __func__);
99	return -ETIMEDOUT;
100}
101
102int ca_mdio_write(u32 addr, u32 offset, u16 data)
103{
104	u32 reg_addr, reg_val;
105	struct NI_MDIO_OPER_T mdio_oper;
106
107	/* support range: 1~31*/
108	if (addr < CA_MDIO_ADDR_MIN || addr > CA_MDIO_ADDR_MAX)
109		return -EINVAL;
110
111	/* the phy addr 5 is connect to RGMII */
112	if (addr >= 5)
113		return ca_mdio_write_rgmii(addr, offset, data);
114
115	memset(&mdio_oper, 0, sizeof(mdio_oper));
116	mdio_oper.reg_off = offset;
117	mdio_oper.phy_addr = addr;
118	mdio_oper.reg_base = CA_NI_MDIO_REG_BASE;
119	reg_val = data;
120	memcpy(&reg_addr, &mdio_oper, sizeof(reg_addr));
121	ca_reg_write(&reg_val, (u64)reg_addr, 0);
122
123	return 0;
124}
125
126static int ca_mdio_read_rgmii(u32 addr, u32 offset, u16 *data)
127{
128	u32 loop_wait = __MDIO_ACCESS_TIMEOUT;
129	struct PER_MDIO_ADDR_t mdio_addr;
130	struct PER_MDIO_CTRL_t mdio_ctrl;
131	struct PER_MDIO_RDDATA_t read_data;
132	struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
133
134	memset(&mdio_addr, 0, sizeof(mdio_addr));
135	mdio_addr.mdio_addr = addr;
136	mdio_addr.mdio_offset = offset;
137	mdio_addr.mdio_rd_wr = __MDIO_RD_FLAG;
138	ca_reg_write(&mdio_addr, (u64)priv->per_mdio_base_addr,
139		     PER_MDIO_ADDR_OFFSET);
140
141	memset(&mdio_ctrl, 0, sizeof(mdio_ctrl));
142	mdio_ctrl.mdiostart = 1;
143	ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
144		     PER_MDIO_CTRL_OFFSET);
145
146	do {
147		ca_reg_read(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
148			    PER_MDIO_CTRL_OFFSET);
149		if (mdio_ctrl.mdiodone) {
150			ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
151				     PER_MDIO_CTRL_OFFSET);
152			ca_reg_read(&read_data, (u64)priv->per_mdio_base_addr,
153				    PER_MDIO_RDDATA_OFFSET);
154			*data = read_data.mdio_rddata;
155			return 0;
156		}
157	} while (--loop_wait);
158
159	printf("CA NI %s: TIMEOUT!!\n", __func__);
160	return -ETIMEDOUT;
161}
162
163int ca_mdio_read(u32 addr, u32 offset, u16 *data)
164{
165	u32 reg_addr, reg_val;
166	struct NI_MDIO_OPER_T mdio_oper;
167
168	if (!data)
169		return -EINVAL;
170
171	/* support range: 1~31*/
172	if (addr < CA_MDIO_ADDR_MIN || addr > CA_MDIO_ADDR_MAX)
173		return -EINVAL;
174
175	/* the phy addr 5 is connect to RGMII */
176	if (addr >= 5)
177		return ca_mdio_read_rgmii(addr, offset, data);
178
179	memset(&mdio_oper, 0, sizeof(mdio_oper));
180	mdio_oper.reg_off = offset;
181	mdio_oper.phy_addr = addr;
182	mdio_oper.reg_base = CA_NI_MDIO_REG_BASE;
183	reg_val = *data;
184	memcpy(&reg_addr, &mdio_oper, sizeof(reg_addr));
185	ca_reg_read(&reg_val, (u64)reg_addr, 0);
186	*data = reg_val;
187	return 0;
188}
189
190int ca_miiphy_read(const char *devname, u8 addr, u8 reg, u16 *value)
191{
192	return ca_mdio_read(addr, reg, value);
193}
194
195int ca_miiphy_write(const char *devname, u8 addr, u8 reg, u16 value)
196{
197	return ca_mdio_write(addr, reg, value);
198}
199
200static int cortina_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
201{
202	u16 data;
203
204	ca_mdio_read(addr, reg, &data);
205	return data;
206}
207
208static int cortina_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
209			      u16 val)
210{
211	return ca_mdio_write(addr, reg, val);
212}
213
214static void ca_ni_setup_mac_addr(void)
215{
216	u8 mac[6];
217	struct NI_HV_GLB_MAC_ADDR_CFG0_t mac_addr_cfg0;
218	struct NI_HV_GLB_MAC_ADDR_CFG1_t mac_addr_cfg1;
219	struct NI_HV_PT_PORT_STATIC_CFG_t port_static_cfg;
220	struct NI_HV_XRAM_CPUXRAM_CFG_t cpuxram_cfg;
221	struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
222
223	/* parsing ethaddr and set to NI registers. */
224	if (eth_env_get_enetaddr("ethaddr", mac)) {
225		/* The complete MAC address consists of
226		 * {MAC_ADDR0_mac_addr0[0-3], MAC_ADDR1_mac_addr1[4],
227		 * PT_PORT_STATIC_CFG_mac_addr6[5]}.
228		 */
229		mac_addr_cfg0.mac_addr0 = (mac[0] << 24) + (mac[1] << 16) +
230					  (mac[2] << 8) + mac[3];
231		ca_reg_write(&mac_addr_cfg0, (u64)priv->ni_hv_base_addr,
232			     NI_HV_GLB_MAC_ADDR_CFG0_OFFSET);
233
234		memset(&mac_addr_cfg1, 0, sizeof(mac_addr_cfg1));
235		mac_addr_cfg1.mac_addr1 = mac[4];
236		ca_reg_write(&mac_addr_cfg1, (u64)priv->ni_hv_base_addr,
237			     NI_HV_GLB_MAC_ADDR_CFG1_OFFSET);
238
239		ca_reg_read(&port_static_cfg, (u64)priv->ni_hv_base_addr,
240			    NI_HV_PT_PORT_STATIC_CFG_OFFSET +
241			    (APB0_NI_HV_PT_STRIDE * priv->active_port));
242
243		port_static_cfg.mac_addr6 = mac[5];
244		ca_reg_write(&port_static_cfg, (u64)priv->ni_hv_base_addr,
245			     NI_HV_PT_PORT_STATIC_CFG_OFFSET +
246			     (APB0_NI_HV_PT_STRIDE * priv->active_port));
247
248		/* received only Broadcast and Address matched packets */
249		ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
250			    NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
251		cpuxram_cfg.xram_mgmt_promisc_mode = 0;
252		cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
253		cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
254		ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
255			     NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
256	} else {
257		/* received all packets(promiscuous mode) */
258		ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
259			    NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
260		cpuxram_cfg.xram_mgmt_promisc_mode = 3;
261		cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
262		cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
263		ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
264			     NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
265	}
266}
267
268static void ca_ni_enable_tx_rx(void)
269{
270	struct NI_HV_PT_RXMAC_CFG_t rxmac_cfg;
271	struct NI_HV_PT_TXMAC_CFG_t txmac_cfg;
272	struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
273
274	/* Enable TX and RX functions */
275	ca_reg_read(&rxmac_cfg, (u64)priv->ni_hv_base_addr,
276		    NI_HV_PT_RXMAC_CFG_OFFSET +
277		    (APB0_NI_HV_PT_STRIDE * priv->active_port));
278	rxmac_cfg.rx_en = 1;
279	ca_reg_write(&rxmac_cfg, (u64)priv->ni_hv_base_addr,
280		     NI_HV_PT_RXMAC_CFG_OFFSET +
281		     (APB0_NI_HV_PT_STRIDE * priv->active_port));
282
283	ca_reg_read(&txmac_cfg, (u64)priv->ni_hv_base_addr,
284		    NI_HV_PT_TXMAC_CFG_OFFSET +
285		    (APB0_NI_HV_PT_STRIDE * priv->active_port));
286	txmac_cfg.tx_en = 1;
287	ca_reg_write(&txmac_cfg, (u64)priv->ni_hv_base_addr,
288		     NI_HV_PT_TXMAC_CFG_OFFSET +
289		     (APB0_NI_HV_PT_STRIDE * priv->active_port));
290}
291
292#define AUTO_SCAN_TIMEOUT 3000 /* 3 seconds */
293static int ca_ni_auto_scan_active_port(struct cortina_ni_priv *priv)
294{
295	u8 i;
296	u16 data;
297	u32 start_time;
298
299	start_time = get_timer(0);
300	while (get_timer(start_time) < AUTO_SCAN_TIMEOUT) {
301		for (i = 0; i < priv->valid_port_num; i++) {
302			if (!priv->port_map[i].phy_addr)
303				continue;
304
305			ca_mdio_read(priv->port_map[i].phy_addr, 1, &data);
306			if (data & 0x04) {
307				priv->active_port = priv->port_map[i].port;
308				return 0;
309			}
310		}
311	}
312
313	printf("CA NI %s: auto scan active_port timeout.\n", __func__);
314	return -1;
315}
316
317static void ca_ni_led(int port, int status)
318{
319	char label[10];
320	struct udevice *led_dev;
321
322	if (IS_ENABLED(CONFIG_LED_CORTINA)) {
323		snprintf(label, sizeof(label), "led%d", port);
324		debug("%s: set port %d led %s.\n",
325		      __func__, port, status ? "on" : "off");
326		led_get_by_label(label, &led_dev);
327		led_set_state(led_dev, status);
328	}
329}
330
331static void ca_ni_reset(void)
332{
333	int i;
334	struct NI_HV_GLB_INIT_DONE_t init_done;
335	struct NI_HV_GLB_INTF_RST_CONFIG_t intf_rst_config;
336	struct NI_HV_GLB_STATIC_CFG_t static_cfg;
337	struct GLOBAL_BLOCK_RESET_t glb_blk_reset;
338	struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
339
340	/* NI global resets */
341	ca_reg_read(&glb_blk_reset, (u64)priv->glb_base_addr,
342		    GLOBAL_BLOCK_RESET_OFFSET);
343	glb_blk_reset.reset_ni = 1;
344	ca_reg_write(&glb_blk_reset, (u64)priv->glb_base_addr,
345		     GLOBAL_BLOCK_RESET_OFFSET);
346	/* Remove resets */
347	glb_blk_reset.reset_ni = 0;
348	ca_reg_write(&glb_blk_reset, (u64)priv->glb_base_addr,
349		     GLOBAL_BLOCK_RESET_OFFSET);
350
351	/* check the ready bit of NI module */
352	for (i = 0; i < NI_READ_POLL_COUNT; i++) {
353		ca_reg_read(&init_done, (u64)priv->ni_hv_base_addr,
354			    NI_HV_GLB_INIT_DONE_OFFSET);
355		if (init_done.ni_init_done)
356			break;
357	}
358	if (i == NI_READ_POLL_COUNT) {
359		printf("CA NI %s: NI init done not ready, init_done=0x%x!!!\n",
360		       __func__, init_done.ni_init_done);
361	}
362
363	ca_reg_read(&intf_rst_config, (u64)priv->ni_hv_base_addr,
364		    NI_HV_GLB_INTF_RST_CONFIG_OFFSET);
365	switch (priv->active_port) {
366	case NI_PORT_0:
367		intf_rst_config.intf_rst_p0 = 0;
368		intf_rst_config.mac_rx_rst_p0 = 0;
369		intf_rst_config.mac_tx_rst_p0 = 0;
370		break;
371	case NI_PORT_1:
372		intf_rst_config.intf_rst_p1 = 0;
373		intf_rst_config.mac_rx_rst_p1 = 0;
374		intf_rst_config.mac_tx_rst_p1 = 0;
375		break;
376	case NI_PORT_2:
377		intf_rst_config.intf_rst_p2 = 0;
378		intf_rst_config.mac_rx_rst_p2 = 0;
379		intf_rst_config.mac_tx_rst_p2 = 0;
380		break;
381	case NI_PORT_3:
382		intf_rst_config.intf_rst_p3 = 0;
383		intf_rst_config.mac_tx_rst_p3 = 0;
384		intf_rst_config.mac_rx_rst_p3 = 0;
385		break;
386	case NI_PORT_4:
387		intf_rst_config.intf_rst_p4 = 0;
388		intf_rst_config.mac_tx_rst_p4 = 0;
389		intf_rst_config.mac_rx_rst_p4 = 0;
390		break;
391	}
392
393	ca_reg_write(&intf_rst_config, (u64)priv->ni_hv_base_addr,
394		     NI_HV_GLB_INTF_RST_CONFIG_OFFSET);
395
396	/* Only one GMAC can connect to CPU */
397	ca_reg_read(&static_cfg, (u64)priv->ni_hv_base_addr,
398		    NI_HV_GLB_STATIC_CFG_OFFSET);
399	static_cfg.port_to_cpu = priv->active_port;
400	static_cfg.txmib_mode = 1;
401	static_cfg.rxmib_mode = 1;
402
403	ca_reg_write(&static_cfg, (u64)priv->ni_hv_base_addr,
404		     NI_HV_GLB_STATIC_CFG_OFFSET);
405}
406
407static void ca_internal_gphy_cal(struct cortina_ni_priv *priv)
408{
409	int i, port, num;
410	u32 reg_off, value;
411
412	num = priv->gphy_num;
413	for (port = 0; port < 4; port++) {
414		for (i = 0; i < num; i++) {
415			reg_off = priv->gphy_values[i].reg_off + (port * 0x80);
416			value = priv->gphy_values[i].value;
417			ca_reg_write(&value, reg_off, 0);
418			mdelay(50);
419		}
420	}
421}
422
423static int ca_mdio_register(struct udevice *dev)
424{
425	int ret;
426	struct cortina_ni_priv *priv = dev_get_priv(dev);
427	struct mii_dev *mdio_bus = mdio_alloc();
428
429	if (!mdio_bus)
430		return -ENOMEM;
431
432	mdio_bus->read = cortina_mdio_read;
433	mdio_bus->write = cortina_mdio_write;
434	snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
435
436	mdio_bus->priv = (void *)priv;
437
438	ret = mdio_register(mdio_bus);
439	if (ret)
440		return ret;
441
442	priv->mdio_bus = mdio_bus;
443	return 0;
444}
445
446static void ca_rgmii_init(struct cortina_ni_priv *priv)
447{
448	struct GLOBAL_GLOBAL_CONFIG_t	glb_config;
449	struct GLOBAL_IO_DRIVE_CONTROL_t io_drive_control;
450
451	/* Generating 25Mhz reference clock for switch */
452	ca_reg_read(&glb_config, (u64)priv->glb_base_addr,
453		    GLOBAL_GLOBAL_CONFIG_OFFSET);
454	glb_config.refclk_sel = 0x01;
455	glb_config.ext_reset = 0x01;
456	ca_reg_write(&glb_config, (u64)priv->glb_base_addr,
457		     GLOBAL_GLOBAL_CONFIG_OFFSET);
458
459	mdelay(20);
460
461	/* Do external reset */
462	ca_reg_read(&glb_config, (u64)priv->glb_base_addr,
463		    GLOBAL_GLOBAL_CONFIG_OFFSET);
464	glb_config.ext_reset = 0x0;
465	ca_reg_write(&glb_config, (u64)priv->glb_base_addr,
466		     GLOBAL_GLOBAL_CONFIG_OFFSET);
467
468	ca_reg_read(&io_drive_control, (u64)priv->glb_base_addr,
469		    GLOBAL_IO_DRIVE_CONTROL_OFFSET);
470	io_drive_control.gmac_mode = 2;
471	io_drive_control.gmac_dn = 1;
472	io_drive_control.gmac_dp = 1;
473	ca_reg_write(&io_drive_control, (u64)priv->glb_base_addr,
474		     GLOBAL_IO_DRIVE_CONTROL_OFFSET);
475}
476
477static int ca_phy_probe(struct udevice *dev)
478{
479	int auto_scan_active_port = 0, tmp_port;
480	char *buf;
481	struct cortina_ni_priv *priv = dev_get_priv(dev);
482	struct phy_device *int_phydev, *ext_phydev;
483
484	/* Initialize internal phy device */
485	int_phydev = phy_connect(priv->mdio_bus,
486				 priv->port_map[NI_PORT_3].phy_addr,
487				 dev, priv->phy_interface);
488	if (int_phydev) {
489		int_phydev->supported &= PHY_GBIT_FEATURES;
490		int_phydev->advertising = int_phydev->supported;
491		phy_config(int_phydev);
492	} else {
493		printf("CA NI %s: There is no internal phy device\n", __func__);
494	}
495
496	/* Initialize external phy device */
497	ext_phydev = phy_connect(priv->mdio_bus,
498				 priv->port_map[NI_PORT_4].phy_addr,
499				 dev, priv->phy_interface);
500	if (ext_phydev) {
501		ext_phydev->supported &= PHY_GBIT_FEATURES;
502		ext_phydev->advertising = int_phydev->supported;
503		phy_config(ext_phydev);
504	} else {
505		printf("CA NI %s: There is no external phy device\n", __func__);
506	}
507
508	/* auto scan the first link up port as active_port */
509	buf = env_get("auto_scan_active_port");
510	if (buf != 0) {
511		auto_scan_active_port = simple_strtoul(buf, NULL, 0);
512		printf("CA NI %s: auto_scan_active_port=%d\n", __func__,
513		       auto_scan_active_port);
514	}
515
516	if (auto_scan_active_port) {
517		ca_ni_auto_scan_active_port(priv);
518	} else {
519		buf = env_get("active_port");
520		if (buf != 0) {
521			tmp_port = simple_strtoul(buf, NULL, 0);
522			if (tmp_port < 0 &&
523			    !(priv->valid_port_map && BIT(tmp_port))) {
524				printf("CA NI ERROR: not support this port.");
525				free(dev);
526				free(priv);
527				return 1;
528			}
529
530			priv->active_port = tmp_port;
531		}
532	}
533
534	printf("CA NI %s: active_port=%d\n", __func__, priv->active_port);
535	if (priv->active_port == NI_PORT_4)
536		priv->phydev = ext_phydev;
537	else
538		priv->phydev = int_phydev;
539
540	return 0;
541}
542
543static int cortina_eth_start(struct udevice *dev)
544{
545	int ret;
546	struct NI_HV_XRAM_CPUXRAM_ADRCFG_RX_t cpuxram_adrcfg_rx;
547	struct NI_HV_XRAM_CPUXRAM_ADRCFG_TX_0_t cpuxram_adrcfg_tx;
548	struct NI_HV_XRAM_CPUXRAM_CFG_t	cpuxram_cfg;
549	struct NI_HV_PT_PORT_STATIC_CFG_t port_static_cfg;
550	struct NI_HV_PT_PORT_GLB_CFG_t port_glb_cfg;
551	struct cortina_ni_priv *priv = dev_get_priv(dev);
552	struct phy_device *phydev = priv->phydev;
553
554	ret = phy_startup(priv->phydev);
555	if (ret) {
556		ca_ni_led(priv->active_port, CA_LED_OFF);
557		printf("CA NI Could not initialize PHY %s, active_port=%d\n",
558		       priv->phydev->dev->name, priv->active_port);
559		return ret;
560	}
561
562	if (!priv->phydev->link) {
563		printf("CA NI %s: link down.\n", priv->phydev->dev->name);
564		return 0;
565	}
566
567	ca_ni_led(priv->active_port, CA_LED_ON);
568	printf("CA NI PHY ID 0x%08X %dMbps %s duplex\n",
569	       phydev->phy_id, phydev->speed,
570	       phydev->duplex == DUPLEX_HALF ? "half" : "full");
571
572	/* RX XRAM ADDRESS CONFIG (start and end address) */
573	memset(&cpuxram_adrcfg_rx, 0, sizeof(cpuxram_adrcfg_rx));
574	cpuxram_adrcfg_rx.rx_top_addr = RX_TOP_ADDR;
575	cpuxram_adrcfg_rx.rx_base_addr = RX_BASE_ADDR;
576	ca_reg_write(&cpuxram_adrcfg_rx, (u64)priv->ni_hv_base_addr,
577		     NI_HV_XRAM_CPUXRAM_ADRCFG_RX_OFFSET);
578
579	/* TX XRAM ADDRESS CONFIG (start and end address) */
580	memset(&cpuxram_adrcfg_tx, 0, sizeof(cpuxram_adrcfg_tx));
581	cpuxram_adrcfg_tx.tx_top_addr = TX_TOP_ADDR;
582	cpuxram_adrcfg_tx.tx_base_addr = TX_BASE_ADDR;
583	ca_reg_write(&cpuxram_adrcfg_tx, (u64)priv->ni_hv_base_addr,
584		     NI_HV_XRAM_CPUXRAM_ADRCFG_TX_0_OFFSET);
585
586	/*
587	 * Configuration for Management Ethernet Interface:
588	 * - RGMII 1000 mode or RGMII 100 mode
589	 * - MAC mode
590	 */
591	ca_reg_read(&port_static_cfg, (u64)priv->ni_hv_base_addr,
592		    NI_HV_PT_PORT_STATIC_CFG_OFFSET +
593		    (APB0_NI_HV_PT_STRIDE * priv->active_port));
594	if (phydev->speed == SPEED_1000) {
595		/* port 4 connects to RGMII PHY */
596		if (phydev->addr == 5)
597			port_static_cfg.int_cfg = GE_MAC_INTF_RGMII_1000;
598		else
599			port_static_cfg.int_cfg = GE_MAC_INTF_GMII;
600	} else {
601		/* port 4 connects to RGMII PHY */
602		if (phydev->addr == 5)
603			port_static_cfg.int_cfg = GE_MAC_INTF_RGMII_100;
604		else
605			port_static_cfg.int_cfg = GE_MAC_INTF_MII;
606	}
607
608	ca_reg_write(&port_static_cfg, (u64)priv->ni_hv_base_addr,
609		     NI_HV_PT_PORT_STATIC_CFG_OFFSET +
610		     (APB0_NI_HV_PT_STRIDE * priv->active_port));
611
612	ca_reg_read(&port_glb_cfg, (u64)priv->ni_hv_base_addr,
613		    NI_HV_PT_PORT_GLB_CFG_OFFSET +
614		    (APB0_NI_HV_PT_STRIDE * priv->active_port));
615	port_glb_cfg.speed = phydev->speed == SPEED_10 ? 1 : 0;
616	port_glb_cfg.duplex = phydev->duplex == DUPLEX_HALF ? 1 : 0;
617	ca_reg_write(&port_glb_cfg, (u64)priv->ni_hv_base_addr,
618		     NI_HV_PT_PORT_GLB_CFG_OFFSET +
619		     (APB0_NI_HV_PT_STRIDE * priv->active_port));
620
621	/* Need to toggle the tx and rx cpu_pkt_dis bit */
622	/* after changing Address config register.      */
623	ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
624		    NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
625	cpuxram_cfg.rx_0_cpu_pkt_dis = 1;
626	cpuxram_cfg.tx_0_cpu_pkt_dis = 1;
627	ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
628		     NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
629
630	ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
631		    NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
632	cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
633	cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
634	ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
635		     NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
636
637	ca_ni_enable_tx_rx();
638
639	return 0;
640}
641
642/*********************************************
643 * Packet receive routine from Management FE
644 * Expects a previously allocated buffer and
645 * fills the length
646 * Retruns 0 on success -1 on failure
647 *******************************************/
648static int cortina_eth_recv(struct udevice *dev, int flags, uchar **packetp)
649{
650	u8 *ptr;
651	u32 next_link, pktlen = 0;
652	u32 sw_rx_rd_ptr, hw_rx_wr_ptr, *rx_xram_ptr, *data_ptr;
653	int loop, index = 0, blk_num;
654	struct cortina_ni_priv *priv = dev_get_priv(dev);
655	struct NI_HEADER_X_T header_x;
656	struct NI_PACKET_STATUS packet_status;
657	struct NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_t cpuxram_cpu_sta_rx;
658	struct NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_t cpuxram_cpu_cfg_rx;
659
660	/* get the hw write pointer */
661	memset(&cpuxram_cpu_sta_rx, 0, sizeof(cpuxram_cpu_sta_rx));
662	ca_reg_read(&cpuxram_cpu_sta_rx, (u64)priv->ni_hv_base_addr,
663		    NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
664	hw_rx_wr_ptr = cpuxram_cpu_sta_rx.pkt_wr_ptr;
665
666	/* get the sw read pointer */
667	memset(&cpuxram_cpu_cfg_rx, 0, sizeof(cpuxram_cpu_cfg_rx));
668	ca_reg_read(&cpuxram_cpu_cfg_rx, (u64)priv->ni_hv_base_addr,
669		    NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
670	sw_rx_rd_ptr = cpuxram_cpu_cfg_rx.pkt_rd_ptr;
671
672	debug("%s: NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0 = 0x%p, ", __func__,
673	      priv->ni_hv_base_addr + NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
674	debug("NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0 = 0x%p\n",
675	      priv->ni_hv_base_addr + NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
676	debug("%s : RX hw_wr_ptr = %d, sw_rd_ptr = %d\n",
677	      __func__, hw_rx_wr_ptr, sw_rx_rd_ptr);
678
679	while (sw_rx_rd_ptr != hw_rx_wr_ptr) {
680		/* Point to the absolute memory address of XRAM
681		 * where read pointer is
682		 */
683		rx_xram_ptr = (u32 *)
684			      ((unsigned long)priv->ni_xram_base
685			       + sw_rx_rd_ptr * 8);
686
687		/* Wrap around if required */
688		if (rx_xram_ptr >= (u32 *)(unsigned long)priv->rx_xram_end_adr)
689			rx_xram_ptr = (u32 *)
690				      (unsigned long)priv->rx_xram_base_adr;
691
692		/* Checking header XR. Do not update the read pointer yet */
693		/* skip unused 32-bit in Header XR */
694		rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
695						 priv->rx_xram_base_adr,
696						 priv->rx_xram_end_adr);
697
698		memcpy(&header_x, rx_xram_ptr, sizeof(header_x));
699		next_link = header_x.next_link;
700		/* Header XR [31:0] */
701
702		if (*rx_xram_ptr == 0xffffffff)
703			printf("CA NI %s: XRAM Error !\n", __func__);
704
705		debug("%s : RX next link 0x%x\n", __func__, next_link);
706		debug("%s : bytes_valid %x\n", __func__, header_x.bytes_valid);
707
708		if (header_x.ownership == 0) {
709			/* point to Packet status [31:0] */
710			rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
711							 priv->rx_xram_base_adr,
712							 priv->rx_xram_end_adr);
713
714			memcpy(&packet_status, rx_xram_ptr,
715			       sizeof(*rx_xram_ptr));
716			if (packet_status.valid == 0) {
717				debug("%s: Invalid Packet !!, ", __func__);
718				debug("next_link=%d\n", next_link);
719
720				/* Update the software read pointer */
721				ca_reg_write(&next_link,
722					     (u64)priv->ni_hv_base_addr,
723					NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
724				return 0;
725			}
726
727			if (packet_status.drop ||
728			    packet_status.runt ||
729			    packet_status.oversize ||
730			    packet_status.jabber ||
731			    packet_status.crc_error ||
732			    packet_status.jumbo) {
733				debug("%s: Error Packet!!, ", __func__);
734				debug("next_link=%d\n", next_link);
735
736				/* Update the software read pointer */
737				ca_reg_write(&next_link,
738					     (u64)priv->ni_hv_base_addr,
739					NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
740				return 0;
741			}
742
743			/* check whether packet size is larger than 1514 */
744			if (packet_status.packet_size > 1518) {
745				debug("%s: Error Packet !! Packet size=%d, ",
746				      __func__, packet_status.packet_size);
747				debug("larger than 1518, next_link=%d\n",
748				      next_link);
749
750				/* Update the software read pointer */
751				ca_reg_write(&next_link,
752					     (u64)priv->ni_hv_base_addr,
753					NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
754				return 0;
755			}
756
757			rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
758							 priv->rx_xram_base_adr,
759							 priv->rx_xram_end_adr);
760
761			pktlen = packet_status.packet_size;
762
763			debug("%s : rx packet length = %d\n",
764			      __func__, packet_status.packet_size);
765
766			rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
767							 priv->rx_xram_base_adr,
768							 priv->rx_xram_end_adr);
769
770			data_ptr = (u32 *)net_rx_packets[index];
771
772			/* Read out the packet */
773			/* Data is in little endian form in the XRAM */
774
775			/* Send the packet to upper layer */
776
777			debug("%s: packet data[]=", __func__);
778
779			for (loop = 0; loop <= pktlen / 4; loop++) {
780				ptr = (u8 *)rx_xram_ptr;
781				if (loop < 10)
782					debug("[0x%x]-[0x%x]-[0x%x]-[0x%x]",
783					      ptr[0], ptr[1], ptr[2], ptr[3]);
784				*data_ptr++ = *rx_xram_ptr++;
785				/* Wrap around if required */
786				if (rx_xram_ptr >= (u32 *)
787				    (unsigned long)priv->rx_xram_end_adr) {
788					rx_xram_ptr = (u32 *)(unsigned long)
789						       (priv->rx_xram_base_adr);
790				}
791			}
792
793			debug("\n");
794			net_process_received_packet(net_rx_packets[index],
795						    pktlen);
796			if (++index >= PKTBUFSRX)
797				index = 0;
798			blk_num = net_rx_packets[index][0x2c] * 255 +
799				net_rx_packets[index][0x2d];
800			debug("%s: tftp block number=%d\n", __func__, blk_num);
801
802			/* Update the software read pointer */
803			ca_reg_write(&next_link,
804				     (u64)priv->ni_hv_base_addr,
805				     NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
806		}
807
808		/* get the hw write pointer */
809		ca_reg_read(&cpuxram_cpu_sta_rx, (u64)priv->ni_hv_base_addr,
810			    NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
811		hw_rx_wr_ptr = cpuxram_cpu_sta_rx.pkt_wr_ptr;
812
813		/* get the sw read pointer */
814		ca_reg_read(&sw_rx_rd_ptr, (u64)priv->ni_hv_base_addr,
815			    NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
816	}
817	return 0;
818}
819
820static int cortina_eth_send(struct udevice *dev, void *packet, int length)
821{
822	u32 hw_tx_rd_ptr = 0, sw_tx_wr_ptr = 0;
823	u32 loop, new_pkt_len, ca_crc32;
824	u32 *tx_xram_ptr, *data_ptr;
825	u16 next_link = 0;
826	u8 *ptr, *pkt_buf_ptr, valid_bytes = 0;
827	int pad = 0;
828	static u8 pkt_buf[2048];
829	struct NI_HEADER_X_T hdr_xt;
830	struct NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_t cpuxram_cpu_cfg_tx;
831	struct cortina_ni_priv *priv = dev_get_priv(dev);
832
833	if (!packet || length > 2032)
834		return -1;
835
836	/* Get the hardware read pointer */
837	ca_reg_read(&hw_tx_rd_ptr, (u64)priv->ni_hv_base_addr,
838		    NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0_OFFSET);
839
840	/* Get the software write pointer */
841	ca_reg_read(&sw_tx_wr_ptr, (u64)priv->ni_hv_base_addr,
842		    NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET);
843
844	debug("%s: NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0=0x%p, ",
845	      __func__,
846	      KSEG1_ATU_XLAT(priv->ni_hv_base_addr +
847			     NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0_OFFSET));
848	debug("NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0=0x%p\n",
849	      KSEG1_ATU_XLAT(priv->ni_hv_base_addr +
850			     NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET));
851	debug("%s : hw_tx_rd_ptr = %d\n", __func__, hw_tx_rd_ptr);
852	debug("%s : sw_tx_wr_ptr = %d\n", __func__, sw_tx_wr_ptr);
853
854	if (hw_tx_rd_ptr != sw_tx_wr_ptr) {
855		printf("CA NI %s: Tx FIFO is not available!\n", __func__);
856		return 1;
857	}
858
859	/* a workaround on 2015/10/01
860	 * the packet size+CRC should be 8-byte alignment
861	 */
862	if (((length + 4) % 8) != 0)
863		length += (8 - ((length + 4) % 8));
864
865	memset(pkt_buf, 0x00, sizeof(pkt_buf));
866
867	/* add 8-byte header_A at the beginning of packet */
868	memcpy(&pkt_buf[HEADER_A_SIZE], (const void *)packet, length);
869
870	pad = 64 - (length + 4);	/* if packet length < 60 */
871	pad = (pad < 0) ? 0 : pad;
872
873	debug("%s: length=%d, pad=%d\n", __func__, length, pad);
874
875	new_pkt_len = length + pad;	/* new packet length */
876
877	pkt_buf_ptr = (u8 *)pkt_buf;
878
879	/* Calculate the CRC32, skip 8-byte header_A */
880	ca_crc32 = crc32(0, (u8 *)(pkt_buf_ptr + HEADER_A_SIZE), new_pkt_len);
881
882	debug("%s: crc32 is 0x%x\n", __func__, ca_crc32);
883	debug("%s: ~crc32 is 0x%x\n", __func__, ~ca_crc32);
884	debug("%s: pkt len %d\n", __func__, new_pkt_len);
885	/* should add 8-byte header_! */
886	/* CRC will re-calculated by hardware */
887	memcpy((pkt_buf_ptr + new_pkt_len + HEADER_A_SIZE),
888	       (u8 *)(&ca_crc32), sizeof(ca_crc32));
889	new_pkt_len = new_pkt_len + 4;	/* add CRC */
890
891	valid_bytes = new_pkt_len % 8;
892	valid_bytes = valid_bytes ? valid_bytes : 0;
893	debug("%s: valid_bytes %d\n", __func__, valid_bytes);
894
895	/* should add 8-byte headerA */
896	next_link = sw_tx_wr_ptr +
897		(new_pkt_len + 7 + HEADER_A_SIZE) / 8; /* for headr XT */
898	/* add header */
899	next_link = next_link + 1;
900	/* Wrap around if required */
901	if (next_link > priv->tx_xram_end) {
902		next_link = priv->tx_xram_start +
903			(next_link - (priv->tx_xram_end + 1));
904	}
905
906	debug("%s: TX next_link %x\n", __func__, next_link);
907	memset(&hdr_xt, 0, sizeof(hdr_xt));
908	hdr_xt.ownership = 1;
909	hdr_xt.bytes_valid = valid_bytes;
910	hdr_xt.next_link = next_link;
911
912	tx_xram_ptr = (u32 *)((unsigned long)priv->ni_xram_base
913		      + sw_tx_wr_ptr * 8);
914
915	/* Wrap around if required */
916	if (tx_xram_ptr >= (u32 *)(unsigned long)priv->tx_xram_end_adr)
917		tx_xram_ptr = (u32 *)(unsigned long)priv->tx_xram_base_adr;
918
919	tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
920					 priv->tx_xram_base_adr,
921					 priv->tx_xram_end_adr);
922
923	memcpy(tx_xram_ptr, &hdr_xt, sizeof(*tx_xram_ptr));
924
925	tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
926					 priv->tx_xram_base_adr,
927					 priv->tx_xram_end_adr);
928
929	/* Now to copy the data. The first byte on the line goes first */
930	data_ptr = (u32 *)pkt_buf_ptr;
931	debug("%s: packet data[]=", __func__);
932
933	/* copy header_A to XRAM */
934	for (loop = 0; loop <= (new_pkt_len + HEADER_A_SIZE) / 4; loop++) {
935		ptr = (u8 *)data_ptr;
936		if ((loop % 4) == 0)
937			debug("\n");
938		debug("[0x%x]-[0x%x]-[0x%x]-[0x%x]-",
939		      ptr[0], ptr[1], ptr[2], ptr[3]);
940
941		*tx_xram_ptr = *data_ptr++;
942		tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
943						 priv->tx_xram_base_adr,
944						 priv->tx_xram_end_adr);
945	}
946	debug("\n");
947
948	/* Publish the software write pointer */
949	cpuxram_cpu_cfg_tx.pkt_wr_ptr = next_link;
950	ca_reg_write(&cpuxram_cpu_cfg_tx,
951		     (u64)priv->ni_hv_base_addr,
952		     NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET);
953
954	return 0;
955}
956
957static void cortina_eth_stop(struct udevice *netdev)
958{
959	/* Nothing to do for now. */
960}
961
962static int cortina_eth_probe(struct udevice *dev)
963{
964	int ret, reg_value;
965	struct cortina_ni_priv *priv;
966
967	priv = dev_get_priv(dev);
968	priv->rx_xram_base_adr	= priv->ni_xram_base + (RX_BASE_ADDR * 8);
969	priv->rx_xram_end_adr	= priv->ni_xram_base + ((RX_TOP_ADDR + 1) * 8);
970	priv->rx_xram_start	= RX_BASE_ADDR;
971	priv->rx_xram_end	= RX_TOP_ADDR;
972	priv->tx_xram_base_adr	= priv->ni_xram_base + (TX_BASE_ADDR * 8);
973	priv->tx_xram_end_adr	= priv->ni_xram_base + ((TX_TOP_ADDR + 1) * 8);
974	priv->tx_xram_start	= TX_BASE_ADDR;
975	priv->tx_xram_end	= TX_TOP_ADDR;
976
977	curr_dev = dev;
978	debug("%s: rx_base_addr:%x\t rx_top_addr %x\n",
979	      __func__, priv->rx_xram_start, priv->rx_xram_end);
980	debug("%s: tx_base_addr:%x\t tx_top_addr %x\n",
981	      __func__, priv->tx_xram_start, priv->tx_xram_end);
982	debug("%s: rx physical start address = %x end address = %x\n",
983	      __func__, priv->rx_xram_base_adr, priv->rx_xram_end_adr);
984	debug("%s: tx physical start address = %x end address = %x\n",
985	      __func__, priv->tx_xram_base_adr, priv->tx_xram_end_adr);
986
987	/* MDIO register */
988	ret = ca_mdio_register(dev);
989	if (ret)
990		return ret;
991
992	/* set MDIO pre-scale value */
993	ca_reg_read(&reg_value, (u64)priv->per_mdio_base_addr,
994		    PER_MDIO_CFG_OFFSET);
995	reg_value = reg_value | 0x00280000;
996	ca_reg_write(&reg_value, (u64)priv->per_mdio_base_addr,
997		     PER_MDIO_CFG_OFFSET);
998
999	ca_phy_probe(dev);
1000	priv->phydev->addr = priv->port_map[priv->active_port].phy_addr;
1001
1002	ca_ni_led(priv->active_port, CA_LED_ON);
1003
1004	ca_ni_reset();
1005
1006	printf("CA NI %s: active_port=%d, phy_addr=%d\n",
1007	       __func__, priv->active_port, priv->phydev->addr);
1008	printf("CA NI %s: phy_id=0x%x, phy_id & PHY_ID_MASK=0x%x\n", __func__,
1009	       priv->phydev->phy_id, priv->phydev->phy_id & 0xFFFFFFF0);
1010
1011	/* parsing ethaddr and set to NI registers. */
1012	ca_ni_setup_mac_addr();
1013
1014#ifdef MIIPHY_REGISTER
1015	/* the phy_read and phy_write
1016	 * should meet the proto type of miiphy_register
1017	 */
1018	miiphy_register(dev->name, ca_miiphy_read, ca_miiphy_write);
1019#endif
1020
1021	if (priv->init_rgmii) {
1022		/* hardware settings for RGMII port */
1023		ca_rgmii_init(priv);
1024	}
1025
1026	if (priv->gphy_num > 0) {
1027		/* do internal gphy calibration */
1028		ca_internal_gphy_cal(priv);
1029	}
1030	return 0;
1031}
1032
1033static int ca_ni_of_to_plat(struct udevice *dev)
1034{
1035	int i, ret;
1036	struct cortina_ni_priv *priv = dev_get_priv(dev);
1037
1038	memset(priv, 0, sizeof(struct cortina_ni_priv));
1039	priv->glb_base_addr = dev_remap_addr_index(dev, 0);
1040	if (!priv->glb_base_addr)
1041		return -ENOENT;
1042	printf("CA NI %s: priv->glb_base_addr for index 0 is 0x%p\n",
1043	       __func__, priv->glb_base_addr);
1044
1045	priv->per_mdio_base_addr = dev_remap_addr_index(dev, 1);
1046	if (!priv->per_mdio_base_addr)
1047		return -ENOENT;
1048	printf("CA NI %s: priv->per_mdio_base_addr for index 1 is 0x%p\n",
1049	       __func__, priv->per_mdio_base_addr);
1050
1051	priv->ni_hv_base_addr = dev_remap_addr_index(dev, 2);
1052	if (!priv->ni_hv_base_addr)
1053		return -ENOENT;
1054	printf("CA NI %s: priv->ni_hv_base_addr for index 2 is 0x%p\n",
1055	       __func__, priv->ni_hv_base_addr);
1056
1057	priv->valid_port_map = dev_read_u32_default(dev, "valid-port-map", 1);
1058	priv->valid_port_num = dev_read_u32_default(dev, "valid-port-num", 1);
1059
1060	for (i = 0; i < priv->valid_port_num; i++) {
1061		ret = dev_read_u32_index(dev, "valid-ports", i * 2,
1062					 &priv->port_map[i].phy_addr);
1063		ret = dev_read_u32_index(dev, "valid-ports", (i * 2) + 1,
1064					 &priv->port_map[i].port);
1065	}
1066
1067	priv->gphy_num = dev_read_u32_default(dev, "inter-gphy-num", 1);
1068	for (i = 0; i < priv->gphy_num; i++) {
1069		ret = dev_read_u32_index(dev, "inter-gphy-val", i * 2,
1070					 &priv->gphy_values[i].reg_off);
1071		ret = dev_read_u32_index(dev, "inter-gphy-val", (i * 2) + 1,
1072					 &priv->gphy_values[i].value);
1073	}
1074
1075	priv->active_port = dev_read_u32_default(dev, "def-active-port", 1);
1076	priv->init_rgmii = dev_read_u32_default(dev, "init-rgmii", 1);
1077	priv->ni_xram_base = dev_read_u32_default(dev, "ni-xram-base", 1);
1078	return 0;
1079}
1080
1081static const struct eth_ops cortina_eth_ops = {
1082	.start = cortina_eth_start,
1083	.send = cortina_eth_send,
1084	.recv = cortina_eth_recv,
1085	.stop = cortina_eth_stop,
1086};
1087
1088static const struct udevice_id cortina_eth_ids[] = {
1089	{ .compatible = "eth_cortina" },
1090	{ }
1091};
1092
1093U_BOOT_DRIVER(eth_cortina) = {
1094	.name = "eth_cortina",
1095	.id = UCLASS_ETH,
1096	.of_match = cortina_eth_ids,
1097	.probe = cortina_eth_probe,
1098	.ops = &cortina_eth_ops,
1099	.priv_auto = sizeof(struct cortina_ni_priv),
1100	.plat_auto = sizeof(struct eth_pdata),
1101	.of_to_plat = ca_ni_of_to_plat,
1102};
1103