1// SPDX-License-Identifier: GPL-2.0
2
3/* Texas Instruments ICSSG SR1.0 Ethernet Driver
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 * Copyright (c) Siemens AG, 2024
7 *
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/genalloc.h>
12#include <linux/kernel.h>
13#include <linux/mfd/syscon.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/of_mdio.h>
17#include <linux/of_net.h>
18#include <linux/platform_device.h>
19#include <linux/property.h>
20#include <linux/phy.h>
21#include <linux/remoteproc/pruss.h>
22#include <linux/pruss_driver.h>
23
24#include "icssg_prueth.h"
25#include "icssg_mii_rt.h"
26#include "../k3-cppi-desc-pool.h"
27
28#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG SR1.0 Ethernet driver"
29
30/* SR1: Set buffer sizes for the pools. There are 8 internal queues
31 * implemented in firmware, but only 4 tx channels/threads in the Egress
32 * direction to firmware. Need a high priority queue for management
33 * messages since they shouldn't be blocked even during high traffic
34 * situation. So use Q0-Q2 as data queues and Q3 as management queue
35 * in the max case. However for ease of configuration, use the max
36 * data queue + 1 for management message if we are not using max
37 * case.
38 *
39 * Allocate 4 MTU buffers per data queue.  Firmware requires
40 * pool sizes to be set for internal queues. Set the upper 5 queue
41 * pool size to min size of 128 bytes since there are only 3 tx
42 * data channels and management queue requires only minimum buffer.
43 * i.e lower queues are used by driver and highest priority queue
44 * from that is used for management message.
45 */
46
47static int emac_egress_buf_pool_size[] = {
48	PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_SIZE_SR1,
49	PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
50	PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
51	PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1
52};
53
54static void icssg_config_sr1(struct prueth *prueth, struct prueth_emac *emac,
55			     int slice)
56{
57	struct icssg_sr1_config config;
58	void __iomem *va;
59	int i, index;
60
61	memset(&config, 0, sizeof(config));
62	config.addr_lo = cpu_to_le32(lower_32_bits(prueth->msmcram.pa));
63	config.addr_hi = cpu_to_le32(upper_32_bits(prueth->msmcram.pa));
64	config.rx_flow_id = cpu_to_le32(emac->rx_flow_id_base); /* flow id for host port */
65	config.rx_mgr_flow_id = cpu_to_le32(emac->rx_mgm_flow_id_base); /* for mgm ch */
66	config.rand_seed = cpu_to_le32(get_random_u32());
67
68	for (i = PRUETH_EMAC_BUF_POOL_START_SR1; i < PRUETH_NUM_BUF_POOLS_SR1; i++) {
69		index =  i - PRUETH_EMAC_BUF_POOL_START_SR1;
70		config.tx_buf_sz[i] = cpu_to_le32(emac_egress_buf_pool_size[index]);
71	}
72
73	va = prueth->shram.va + slice * ICSSG_CONFIG_OFFSET_SLICE1;
74	memcpy_toio(va, &config, sizeof(config));
75
76	emac->speed = SPEED_1000;
77	emac->duplex = DUPLEX_FULL;
78}
79
80static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
81{
82	struct cppi5_host_desc_t *first_desc;
83	u32 pkt_len = sizeof(emac->cmd_data);
84	__le32 *data = emac->cmd_data;
85	dma_addr_t desc_dma, buf_dma;
86	struct prueth_tx_chn *tx_chn;
87	void **swdata;
88	int ret = 0;
89	u32 *epib;
90
91	netdev_dbg(emac->ndev, "Sending cmd %x\n", cmd);
92
93	/* only one command at a time allowed to firmware */
94	mutex_lock(&emac->cmd_lock);
95	data[0] = cpu_to_le32(cmd);
96
97	/* highest priority channel for management messages */
98	tx_chn = &emac->tx_chns[emac->tx_ch_num - 1];
99
100	/* Map the linear buffer */
101	buf_dma = dma_map_single(tx_chn->dma_dev, data, pkt_len, DMA_TO_DEVICE);
102	if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
103		netdev_err(emac->ndev, "cmd %x: failed to map cmd buffer\n", cmd);
104		ret = -EINVAL;
105		goto err_unlock;
106	}
107
108	first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
109	if (!first_desc) {
110		netdev_err(emac->ndev, "cmd %x: failed to allocate descriptor\n", cmd);
111		dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
112		ret = -ENOMEM;
113		goto err_unlock;
114	}
115
116	cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
117			 PRUETH_NAV_PS_DATA_SIZE);
118	cppi5_hdesc_set_pkttype(first_desc, PRUETH_PKT_TYPE_CMD);
119	epib = first_desc->epib;
120	epib[0] = 0;
121	epib[1] = 0;
122
123	cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
124	swdata = cppi5_hdesc_get_swdata(first_desc);
125	*swdata = data;
126
127	cppi5_hdesc_set_pktlen(first_desc, pkt_len);
128	desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
129
130	/* send command */
131	reinit_completion(&emac->cmd_complete);
132	ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
133	if (ret) {
134		netdev_err(emac->ndev, "cmd %x: push failed: %d\n", cmd, ret);
135		goto free_desc;
136	}
137	ret = wait_for_completion_timeout(&emac->cmd_complete, msecs_to_jiffies(100));
138	if (!ret)
139		netdev_err(emac->ndev, "cmd %x: completion timeout\n", cmd);
140
141	mutex_unlock(&emac->cmd_lock);
142
143	return ret;
144free_desc:
145	prueth_xmit_free(tx_chn, first_desc);
146err_unlock:
147	mutex_unlock(&emac->cmd_lock);
148
149	return ret;
150}
151
152static void icssg_config_set_speed_sr1(struct prueth_emac *emac)
153{
154	u32 cmd = ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1, val;
155	struct prueth *prueth = emac->prueth;
156	int slice = prueth_emac_slice(emac);
157
158	val = icssg_rgmii_get_speed(prueth->miig_rt, slice);
159	/* firmware expects speed settings in bit 2-1 */
160	val <<= 1;
161	cmd |= val;
162
163	val = icssg_rgmii_get_fullduplex(prueth->miig_rt, slice);
164	/* firmware expects full duplex settings in bit 3 */
165	val <<= 3;
166	cmd |= val;
167
168	emac_send_command_sr1(emac, cmd);
169}
170
171/* called back by PHY layer if there is change in link state of hw port*/
172static void emac_adjust_link_sr1(struct net_device *ndev)
173{
174	struct prueth_emac *emac = netdev_priv(ndev);
175	struct phy_device *phydev = ndev->phydev;
176	struct prueth *prueth = emac->prueth;
177	bool new_state = false;
178	unsigned long flags;
179
180	if (phydev->link) {
181		/* check the mode of operation - full/half duplex */
182		if (phydev->duplex != emac->duplex) {
183			new_state = true;
184			emac->duplex = phydev->duplex;
185		}
186		if (phydev->speed != emac->speed) {
187			new_state = true;
188			emac->speed = phydev->speed;
189		}
190		if (!emac->link) {
191			new_state = true;
192			emac->link = 1;
193		}
194	} else if (emac->link) {
195		new_state = true;
196		emac->link = 0;
197
198		/* f/w should support 100 & 1000 */
199		emac->speed = SPEED_1000;
200
201		/* half duplex may not be supported by f/w */
202		emac->duplex = DUPLEX_FULL;
203	}
204
205	if (new_state) {
206		phy_print_status(phydev);
207
208		/* update RGMII and MII configuration based on PHY negotiated
209		 * values
210		 */
211		if (emac->link) {
212			/* Set the RGMII cfg for gig en and full duplex */
213			icssg_update_rgmii_cfg(prueth->miig_rt, emac);
214
215			/* update the Tx IPG based on 100M/1G speed */
216			spin_lock_irqsave(&emac->lock, flags);
217			icssg_config_ipg(emac);
218			spin_unlock_irqrestore(&emac->lock, flags);
219			icssg_config_set_speed_sr1(emac);
220		}
221	}
222
223	if (emac->link) {
224		/* reactivate the transmit queue */
225		netif_tx_wake_all_queues(ndev);
226	} else {
227		netif_tx_stop_all_queues(ndev);
228		prueth_cleanup_tx_ts(emac);
229	}
230}
231
232static int emac_phy_connect(struct prueth_emac *emac)
233{
234	struct prueth *prueth = emac->prueth;
235	struct net_device *ndev = emac->ndev;
236	/* connect PHY */
237	ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
238				      &emac_adjust_link_sr1, 0,
239				      emac->phy_if);
240	if (!ndev->phydev) {
241		dev_err(prueth->dev, "couldn't connect to phy %s\n",
242			emac->phy_node->full_name);
243		return -ENODEV;
244	}
245
246	if (!emac->half_duplex) {
247		dev_dbg(prueth->dev, "half duplex mode is not supported\n");
248		phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
249	}
250
251	/* Remove 100Mbits half-duplex due to RGMII misreporting connection
252	 * as full duplex */
253	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
254
255	/* remove unsupported modes */
256	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
257	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
258	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
259
260	if (emac->phy_if == PHY_INTERFACE_MODE_MII)
261		phy_set_max_speed(ndev->phydev, SPEED_100);
262
263	return 0;
264}
265
266/* get one packet from requested flow_id
267 *
268 * Returns skb pointer if packet found else NULL
269 * Caller must free the returned skb.
270 */
271static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
272					     u32 flow_id)
273{
274	struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
275	struct net_device *ndev = emac->ndev;
276	struct cppi5_host_desc_t *desc_rx;
277	struct sk_buff *skb, *new_skb;
278	dma_addr_t desc_dma, buf_dma;
279	u32 buf_dma_len, pkt_len;
280	void **swdata;
281	int ret;
282
283	ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
284	if (ret) {
285		if (ret != -ENODATA)
286			netdev_err(ndev, "rx mgm pop: failed: %d\n", ret);
287		return NULL;
288	}
289
290	if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown */
291		return NULL;
292
293	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
294
295	/* Fix FW bug about incorrect PSDATA size */
296	if (cppi5_hdesc_get_psdata_size(desc_rx) != PRUETH_NAV_PS_DATA_SIZE) {
297		cppi5_hdesc_update_psdata_size(desc_rx,
298					       PRUETH_NAV_PS_DATA_SIZE);
299	}
300
301	swdata = cppi5_hdesc_get_swdata(desc_rx);
302	skb = *swdata;
303	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
304	pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
305
306	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
307	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
308
309	new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
310	/* if allocation fails we drop the packet but push the
311	 * descriptor back to the ring with old skb to prevent a stall
312	 */
313	if (!new_skb) {
314		netdev_err(ndev,
315			   "skb alloc failed, dropped mgm pkt from flow %d\n",
316			   flow_id);
317		new_skb = skb;
318		skb = NULL;	/* return NULL */
319	} else {
320		/* return the filled skb */
321		skb_put(skb, pkt_len);
322	}
323
324	/* queue another DMA */
325	ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn);
326	if (WARN_ON(ret < 0))
327		dev_kfree_skb_any(new_skb);
328
329	return skb;
330}
331
332static void prueth_tx_ts_sr1(struct prueth_emac *emac,
333			     struct emac_tx_ts_response_sr1 *tsr)
334{
335	struct skb_shared_hwtstamps ssh;
336	u32 hi_ts, lo_ts, cookie;
337	struct sk_buff *skb;
338	u64 ns;
339
340	hi_ts = le32_to_cpu(tsr->hi_ts);
341	lo_ts = le32_to_cpu(tsr->lo_ts);
342
343	ns = (u64)hi_ts << 32 | lo_ts;
344
345	cookie = le32_to_cpu(tsr->cookie);
346	if (cookie >= PRUETH_MAX_TX_TS_REQUESTS) {
347		netdev_dbg(emac->ndev, "Invalid TX TS cookie 0x%x\n",
348			   cookie);
349		return;
350	}
351
352	skb = emac->tx_ts_skb[cookie];
353	emac->tx_ts_skb[cookie] = NULL;	/* free slot */
354
355	memset(&ssh, 0, sizeof(ssh));
356	ssh.hwtstamp = ns_to_ktime(ns);
357
358	skb_tstamp_tx(skb, &ssh);
359	dev_consume_skb_any(skb);
360}
361
362static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
363{
364	struct prueth_emac *emac = dev_id;
365	struct sk_buff *skb;
366
367	skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
368	if (!skb)
369		return IRQ_NONE;
370
371	prueth_tx_ts_sr1(emac, (void *)skb->data);
372	dev_kfree_skb_any(skb);
373
374	return IRQ_HANDLED;
375}
376
377static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
378{
379	struct prueth_emac *emac = dev_id;
380	struct sk_buff *skb;
381	u32 rsp;
382
383	skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
384	if (!skb)
385		return IRQ_NONE;
386
387	/* Process command response */
388	rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000;
389	if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
390		netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
391		complete(&emac->cmd_complete);
392	} else if (rsp == ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1) {
393		netdev_dbg(emac->ndev, "f/w Speed/Duplex cmd rsp %x\n", rsp);
394		complete(&emac->cmd_complete);
395	}
396
397	dev_kfree_skb_any(skb);
398
399	return IRQ_HANDLED;
400}
401
402static struct icssg_firmwares icssg_sr1_emac_firmwares[] = {
403	{
404		.pru = "ti-pruss/am65x-pru0-prueth-fw.elf",
405		.rtu = "ti-pruss/am65x-rtu0-prueth-fw.elf",
406	},
407	{
408		.pru = "ti-pruss/am65x-pru1-prueth-fw.elf",
409		.rtu = "ti-pruss/am65x-rtu1-prueth-fw.elf",
410	}
411};
412
413static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
414{
415	struct icssg_firmwares *firmwares;
416	struct device *dev = prueth->dev;
417	int slice, ret;
418
419	firmwares = icssg_sr1_emac_firmwares;
420
421	slice = prueth_emac_slice(emac);
422	if (slice < 0) {
423		netdev_err(emac->ndev, "invalid port\n");
424		return -EINVAL;
425	}
426
427	icssg_config_sr1(prueth, emac, slice);
428
429	ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
430	ret = rproc_boot(prueth->pru[slice]);
431	if (ret) {
432		dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
433		return -EINVAL;
434	}
435
436	ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
437	ret = rproc_boot(prueth->rtu[slice]);
438	if (ret) {
439		dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
440		goto halt_pru;
441	}
442
443	emac->fw_running = 1;
444	return 0;
445
446halt_pru:
447	rproc_shutdown(prueth->pru[slice]);
448
449	return ret;
450}
451
452/**
453 * emac_ndo_open - EMAC device open
454 * @ndev: network adapter device
455 *
456 * Called when system wants to start the interface.
457 *
458 * Return: 0 for a successful open, or appropriate error code
459 */
460static int emac_ndo_open(struct net_device *ndev)
461{
462	struct prueth_emac *emac = netdev_priv(ndev);
463	int num_data_chn = emac->tx_ch_num - 1;
464	struct prueth *prueth = emac->prueth;
465	int slice = prueth_emac_slice(emac);
466	struct device *dev = prueth->dev;
467	int max_rx_flows, rx_flow;
468	int ret, i;
469
470	/* clear SMEM and MSMC settings for all slices */
471	if (!prueth->emacs_initialized) {
472		memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
473		memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
474	}
475
476	/* set h/w MAC as user might have re-configured */
477	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
478
479	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
480
481	icssg_class_default(prueth->miig_rt, slice, 0, true);
482
483	/* Notify the stack of the actual queue counts. */
484	ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
485	if (ret) {
486		dev_err(dev, "cannot set real number of tx queues\n");
487		return ret;
488	}
489
490	init_completion(&emac->cmd_complete);
491	ret = prueth_init_tx_chns(emac);
492	if (ret) {
493		dev_err(dev, "failed to init tx channel: %d\n", ret);
494		return ret;
495	}
496
497	max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
498	ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
499				  max_rx_flows, PRUETH_MAX_RX_DESC);
500	if (ret) {
501		dev_err(dev, "failed to init rx channel: %d\n", ret);
502		goto cleanup_tx;
503	}
504
505	ret = prueth_init_rx_chns(emac, &emac->rx_mgm_chn, "rxmgm",
506				  PRUETH_MAX_RX_MGM_FLOWS_SR1,
507				  PRUETH_MAX_RX_MGM_DESC_SR1);
508	if (ret) {
509		dev_err(dev, "failed to init rx mgmt channel: %d\n",
510			ret);
511		goto cleanup_rx;
512	}
513
514	ret = prueth_ndev_add_tx_napi(emac);
515	if (ret)
516		goto cleanup_rx_mgm;
517
518	/* we use only the highest priority flow for now i.e. @irq[3] */
519	rx_flow = PRUETH_RX_FLOW_DATA_SR1;
520	ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
521			  IRQF_TRIGGER_HIGH, dev_name(dev), emac);
522	if (ret) {
523		dev_err(dev, "unable to request RX IRQ\n");
524		goto cleanup_napi;
525	}
526
527	ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
528				   NULL, prueth_rx_mgm_rsp_thread,
529				   IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
530				   dev_name(dev), emac);
531	if (ret) {
532		dev_err(dev, "unable to request RX Management RSP IRQ\n");
533		goto free_rx_irq;
534	}
535
536	ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
537				   NULL, prueth_rx_mgm_ts_thread_sr1,
538				   IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
539				   dev_name(dev), emac);
540	if (ret) {
541		dev_err(dev, "unable to request RX Management TS IRQ\n");
542		goto free_rx_mgm_rsp_irq;
543	}
544
545	/* reset and start PRU firmware */
546	ret = prueth_emac_start(prueth, emac);
547	if (ret)
548		goto free_rx_mgmt_ts_irq;
549
550	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
551
552	/* Prepare RX */
553	ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
554	if (ret)
555		goto stop;
556
557	ret = prueth_prepare_rx_chan(emac, &emac->rx_mgm_chn, 64);
558	if (ret)
559		goto reset_rx_chn;
560
561	ret = k3_udma_glue_enable_rx_chn(emac->rx_mgm_chn.rx_chn);
562	if (ret)
563		goto reset_rx_chn;
564
565	ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
566	if (ret)
567		goto reset_rx_mgm_chn;
568
569	for (i = 0; i < emac->tx_ch_num; i++) {
570		ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
571		if (ret)
572			goto reset_tx_chan;
573	}
574
575	/* Enable NAPI in Tx and Rx direction */
576	for (i = 0; i < emac->tx_ch_num; i++)
577		napi_enable(&emac->tx_chns[i].napi_tx);
578	napi_enable(&emac->napi_rx);
579
580	/* start PHY */
581	phy_start(ndev->phydev);
582
583	prueth->emacs_initialized++;
584
585	queue_work(system_long_wq, &emac->stats_work.work);
586
587	return 0;
588
589reset_tx_chan:
590	/* Since interface is not yet up, there is wouldn't be
591	 * any SKB for completion. So set false to free_skb
592	 */
593	prueth_reset_tx_chan(emac, i, false);
594reset_rx_mgm_chn:
595	prueth_reset_rx_chan(&emac->rx_mgm_chn,
596			     PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
597reset_rx_chn:
598	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
599stop:
600	prueth_emac_stop(emac);
601free_rx_mgmt_ts_irq:
602	free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
603		 emac);
604free_rx_mgm_rsp_irq:
605	free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
606		 emac);
607free_rx_irq:
608	free_irq(emac->rx_chns.irq[rx_flow], emac);
609cleanup_napi:
610	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
611cleanup_rx_mgm:
612	prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn,
613			       PRUETH_MAX_RX_MGM_FLOWS_SR1);
614cleanup_rx:
615	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
616cleanup_tx:
617	prueth_cleanup_tx_chns(emac);
618
619	return ret;
620}
621
622/**
623 * emac_ndo_stop - EMAC device stop
624 * @ndev: network adapter device
625 *
626 * Called when system wants to stop or down the interface.
627 *
628 * Return: Always 0 (Success)
629 */
630static int emac_ndo_stop(struct net_device *ndev)
631{
632	struct prueth_emac *emac = netdev_priv(ndev);
633	int rx_flow = PRUETH_RX_FLOW_DATA_SR1;
634	struct prueth *prueth = emac->prueth;
635	int max_rx_flows;
636	int ret, i;
637
638	/* inform the upper layers. */
639	netif_tx_stop_all_queues(ndev);
640
641	/* block packets from wire */
642	if (ndev->phydev)
643		phy_stop(ndev->phydev);
644
645	icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
646
647	emac_send_command_sr1(emac, ICSSG_SHUTDOWN_CMD_SR1);
648
649	atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
650	/* ensure new tdown_cnt value is visible */
651	smp_mb__after_atomic();
652	/* tear down and disable UDMA channels */
653	reinit_completion(&emac->tdown_complete);
654	for (i = 0; i < emac->tx_ch_num; i++)
655		k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
656
657	ret = wait_for_completion_timeout(&emac->tdown_complete,
658					  msecs_to_jiffies(1000));
659	if (!ret)
660		netdev_err(ndev, "tx teardown timeout\n");
661
662	prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
663	for (i = 0; i < emac->tx_ch_num; i++)
664		napi_disable(&emac->tx_chns[i].napi_tx);
665
666	max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
667	k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
668
669	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
670	/* Teardown RX MGM channel */
671	k3_udma_glue_tdown_rx_chn(emac->rx_mgm_chn.rx_chn, true);
672	prueth_reset_rx_chan(&emac->rx_mgm_chn,
673			     PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
674
675	napi_disable(&emac->napi_rx);
676
677	/* Destroying the queued work in ndo_stop() */
678	cancel_delayed_work_sync(&emac->stats_work);
679
680	/* stop PRUs */
681	prueth_emac_stop(emac);
682
683	free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1], emac);
684	free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1], emac);
685	free_irq(emac->rx_chns.irq[rx_flow], emac);
686	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
687	prueth_cleanup_tx_chns(emac);
688
689	prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn, PRUETH_MAX_RX_MGM_FLOWS_SR1);
690	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
691
692	prueth->emacs_initialized--;
693
694	return 0;
695}
696
697static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev)
698{
699	struct prueth_emac *emac = netdev_priv(ndev);
700	bool allmulti = ndev->flags & IFF_ALLMULTI;
701	bool promisc = ndev->flags & IFF_PROMISC;
702	struct prueth *prueth = emac->prueth;
703	int slice = prueth_emac_slice(emac);
704
705	if (promisc) {
706		icssg_class_promiscuous_sr1(prueth->miig_rt, slice);
707		return;
708	}
709
710	if (allmulti) {
711		icssg_class_default(prueth->miig_rt, slice, 1, true);
712		return;
713	}
714
715	icssg_class_default(prueth->miig_rt, slice, 0, true);
716	if (!netdev_mc_empty(ndev)) {
717		/* program multicast address list into Classifier */
718		icssg_class_add_mcast_sr1(prueth->miig_rt, slice, ndev);
719	}
720}
721
722static const struct net_device_ops emac_netdev_ops = {
723	.ndo_open = emac_ndo_open,
724	.ndo_stop = emac_ndo_stop,
725	.ndo_start_xmit = emac_ndo_start_xmit,
726	.ndo_set_mac_address = eth_mac_addr,
727	.ndo_validate_addr = eth_validate_addr,
728	.ndo_tx_timeout = emac_ndo_tx_timeout,
729	.ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1,
730	.ndo_eth_ioctl = emac_ndo_ioctl,
731	.ndo_get_stats64 = emac_ndo_get_stats64,
732	.ndo_get_phys_port_name = emac_ndo_get_phys_port_name,
733};
734
735static int prueth_netdev_init(struct prueth *prueth,
736			      struct device_node *eth_node)
737{
738	struct prueth_emac *emac;
739	struct net_device *ndev;
740	enum prueth_port port;
741	enum prueth_mac mac;
742	/* Only enable one TX channel due to timeouts when
743	 * using multiple channels */
744	int num_tx_chn = 1;
745	int ret;
746
747	port = prueth_node_port(eth_node);
748	if (port == PRUETH_PORT_INVALID)
749		return -EINVAL;
750
751	mac = prueth_node_mac(eth_node);
752	if (mac == PRUETH_MAC_INVALID)
753		return -EINVAL;
754
755	ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
756	if (!ndev)
757		return -ENOMEM;
758
759	emac = netdev_priv(ndev);
760	emac->is_sr1 = 1;
761	emac->prueth = prueth;
762	emac->ndev = ndev;
763	emac->port_id = port;
764	emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
765	if (!emac->cmd_wq) {
766		ret = -ENOMEM;
767		goto free_ndev;
768	}
769
770	INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler);
771
772	ret = pruss_request_mem_region(prueth->pruss,
773				       port == PRUETH_PORT_MII0 ?
774				       PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
775				       &emac->dram);
776	if (ret) {
777		dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
778		ret = -ENOMEM;
779		goto free_wq;
780	}
781
782	/* SR1.0 uses a dedicated high priority channel
783	 * to send commands to the firmware
784	 */
785	emac->tx_ch_num = 2;
786
787	SET_NETDEV_DEV(ndev, prueth->dev);
788	spin_lock_init(&emac->lock);
789	mutex_init(&emac->cmd_lock);
790
791	emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
792	if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
793		dev_err(prueth->dev, "couldn't find phy-handle\n");
794		ret = -ENODEV;
795		goto free;
796	} else if (of_phy_is_fixed_link(eth_node)) {
797		ret = of_phy_register_fixed_link(eth_node);
798		if (ret) {
799			ret = dev_err_probe(prueth->dev, ret,
800					    "failed to register fixed-link phy\n");
801			goto free;
802		}
803
804		emac->phy_node = eth_node;
805	}
806
807	ret = of_get_phy_mode(eth_node, &emac->phy_if);
808	if (ret) {
809		dev_err(prueth->dev, "could not get phy-mode property\n");
810		goto free;
811	}
812
813	if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
814	    !phy_interface_mode_is_rgmii(emac->phy_if)) {
815		dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
816		ret = -EINVAL;
817		goto free;
818	}
819
820	/* AM65 SR2.0 has TX Internal delay always enabled by hardware
821	 * and it is not possible to disable TX Internal delay. The below
822	 * switch case block describes how we handle different phy modes
823	 * based on hardware restriction.
824	 */
825	switch (emac->phy_if) {
826	case PHY_INTERFACE_MODE_RGMII_ID:
827		emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
828		break;
829	case PHY_INTERFACE_MODE_RGMII_TXID:
830		emac->phy_if = PHY_INTERFACE_MODE_RGMII;
831		break;
832	case PHY_INTERFACE_MODE_RGMII:
833	case PHY_INTERFACE_MODE_RGMII_RXID:
834		dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
835		ret = -EINVAL;
836		goto free;
837	default:
838		break;
839	}
840
841	/* get mac address from DT and set private and netdev addr */
842	ret = of_get_ethdev_address(eth_node, ndev);
843	if (!is_valid_ether_addr(ndev->dev_addr)) {
844		eth_hw_addr_random(ndev);
845		dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
846			 port, ndev->dev_addr);
847	}
848	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
849
850	ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
851	ndev->max_mtu = PRUETH_MAX_MTU;
852	ndev->netdev_ops = &emac_netdev_ops;
853	ndev->ethtool_ops = &icssg_ethtool_ops;
854	ndev->hw_features = NETIF_F_SG;
855	ndev->features = ndev->hw_features;
856
857	netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll);
858	prueth->emac[mac] = emac;
859
860	return 0;
861
862free:
863	pruss_release_mem_region(prueth->pruss, &emac->dram);
864free_wq:
865	destroy_workqueue(emac->cmd_wq);
866free_ndev:
867	emac->ndev = NULL;
868	prueth->emac[mac] = NULL;
869	free_netdev(ndev);
870
871	return ret;
872}
873
874static int prueth_probe(struct platform_device *pdev)
875{
876	struct device_node *eth_node, *eth_ports_node;
877	struct device_node  *eth0_node = NULL;
878	struct device_node  *eth1_node = NULL;
879	struct device *dev = &pdev->dev;
880	struct device_node *np;
881	struct prueth *prueth;
882	struct pruss *pruss;
883	u32 msmc_ram_size;
884	int i, ret;
885
886	np = dev->of_node;
887
888	prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
889	if (!prueth)
890		return -ENOMEM;
891
892	dev_set_drvdata(dev, prueth);
893	prueth->pdev = pdev;
894	prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
895
896	prueth->dev = dev;
897	eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
898	if (!eth_ports_node)
899		return -ENOENT;
900
901	for_each_child_of_node(eth_ports_node, eth_node) {
902		u32 reg;
903
904		if (strcmp(eth_node->name, "port"))
905			continue;
906		ret = of_property_read_u32(eth_node, "reg", &reg);
907		if (ret < 0) {
908			dev_err(dev, "%pOF error reading port_id %d\n",
909				eth_node, ret);
910		}
911
912		of_node_get(eth_node);
913
914		if (reg == 0) {
915			eth0_node = eth_node;
916			if (!of_device_is_available(eth0_node)) {
917				of_node_put(eth0_node);
918				eth0_node = NULL;
919			}
920		} else if (reg == 1) {
921			eth1_node = eth_node;
922			if (!of_device_is_available(eth1_node)) {
923				of_node_put(eth1_node);
924				eth1_node = NULL;
925			}
926		} else {
927			dev_err(dev, "port reg should be 0 or 1\n");
928		}
929	}
930
931	of_node_put(eth_ports_node);
932
933	/* At least one node must be present and available else we fail */
934	if (!eth0_node && !eth1_node) {
935		dev_err(dev, "neither port0 nor port1 node available\n");
936		return -ENODEV;
937	}
938
939	if (eth0_node == eth1_node) {
940		dev_err(dev, "port0 and port1 can't have same reg\n");
941		of_node_put(eth0_node);
942		return -ENODEV;
943	}
944
945	prueth->eth_node[PRUETH_MAC0] = eth0_node;
946	prueth->eth_node[PRUETH_MAC1] = eth1_node;
947
948	prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
949	if (IS_ERR(prueth->miig_rt)) {
950		dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
951		return -ENODEV;
952	}
953
954	prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
955	if (IS_ERR(prueth->mii_rt)) {
956		dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
957		return -ENODEV;
958	}
959
960	if (eth0_node) {
961		ret = prueth_get_cores(prueth, ICSS_SLICE0, true);
962		if (ret)
963			goto put_cores;
964	}
965
966	if (eth1_node) {
967		ret = prueth_get_cores(prueth, ICSS_SLICE1, true);
968		if (ret)
969			goto put_cores;
970	}
971
972	pruss = pruss_get(eth0_node ?
973			  prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
974	if (IS_ERR(pruss)) {
975		ret = PTR_ERR(pruss);
976		dev_err(dev, "unable to get pruss handle\n");
977		goto put_cores;
978	}
979
980	prueth->pruss = pruss;
981
982	ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
983				       &prueth->shram);
984	if (ret) {
985		dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
986		goto put_pruss;
987	}
988
989	prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
990	if (!prueth->sram_pool) {
991		dev_err(dev, "unable to get SRAM pool\n");
992		ret = -ENODEV;
993
994		goto put_mem;
995	}
996
997	msmc_ram_size = MSMC_RAM_SIZE_SR1;
998
999	prueth->msmcram.va = (void __iomem *)gen_pool_alloc(prueth->sram_pool,
1000							    msmc_ram_size);
1001
1002	if (!prueth->msmcram.va) {
1003		ret = -ENOMEM;
1004		dev_err(dev, "unable to allocate MSMC resource\n");
1005		goto put_mem;
1006	}
1007	prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1008						   (unsigned long)prueth->msmcram.va);
1009	prueth->msmcram.size = msmc_ram_size;
1010	memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1011	dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1012		prueth->msmcram.va, prueth->msmcram.size);
1013
1014	if (eth0_node) {
1015		ret = prueth_netdev_init(prueth, eth0_node);
1016		if (ret) {
1017			dev_err_probe(dev, ret, "netdev init %s failed\n",
1018				      eth0_node->name);
1019			goto free_pool;
1020		}
1021
1022		if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
1023			prueth->emac[PRUETH_MAC0]->half_duplex = 1;
1024	}
1025
1026	if (eth1_node) {
1027		ret = prueth_netdev_init(prueth, eth1_node);
1028		if (ret) {
1029			dev_err_probe(dev, ret, "netdev init %s failed\n",
1030				      eth1_node->name);
1031			goto netdev_exit;
1032		}
1033
1034		if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
1035			prueth->emac[PRUETH_MAC1]->half_duplex = 1;
1036	}
1037
1038	/* register the network devices */
1039	if (eth0_node) {
1040		ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1041		if (ret) {
1042			dev_err(dev, "can't register netdev for port MII0\n");
1043			goto netdev_exit;
1044		}
1045
1046		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1047		emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1048		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1049	}
1050
1051	if (eth1_node) {
1052		ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1053		if (ret) {
1054			dev_err(dev, "can't register netdev for port MII1\n");
1055			goto netdev_unregister;
1056		}
1057
1058		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1059		emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1060		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1061	}
1062
1063	dev_info(dev, "TI PRU SR1.0 ethernet driver initialized: %s EMAC mode\n",
1064		 (!eth0_node || !eth1_node) ? "single" : "dual");
1065
1066	if (eth1_node)
1067		of_node_put(eth1_node);
1068	if (eth0_node)
1069		of_node_put(eth0_node);
1070
1071	return 0;
1072
1073netdev_unregister:
1074	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1075		if (!prueth->registered_netdevs[i])
1076			continue;
1077
1078		if (prueth->emac[i]->ndev->phydev) {
1079			phy_disconnect(prueth->emac[i]->ndev->phydev);
1080			prueth->emac[i]->ndev->phydev = NULL;
1081		}
1082		unregister_netdev(prueth->registered_netdevs[i]);
1083	}
1084
1085netdev_exit:
1086	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1087		eth_node = prueth->eth_node[i];
1088		if (!eth_node)
1089			continue;
1090
1091		prueth_netdev_exit(prueth, eth_node);
1092	}
1093
1094free_pool:
1095	gen_pool_free(prueth->sram_pool,
1096		      (unsigned long)prueth->msmcram.va, msmc_ram_size);
1097
1098put_mem:
1099	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1100
1101put_pruss:
1102	pruss_put(prueth->pruss);
1103
1104put_cores:
1105	if (eth1_node) {
1106		prueth_put_cores(prueth, ICSS_SLICE1);
1107		of_node_put(eth1_node);
1108	}
1109
1110	if (eth0_node) {
1111		prueth_put_cores(prueth, ICSS_SLICE0);
1112		of_node_put(eth0_node);
1113	}
1114
1115	return ret;
1116}
1117
1118static void prueth_remove(struct platform_device *pdev)
1119{
1120	struct prueth *prueth = platform_get_drvdata(pdev);
1121	struct device_node *eth_node;
1122	int i;
1123
1124	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1125		if (!prueth->registered_netdevs[i])
1126			continue;
1127		phy_stop(prueth->emac[i]->ndev->phydev);
1128		phy_disconnect(prueth->emac[i]->ndev->phydev);
1129		prueth->emac[i]->ndev->phydev = NULL;
1130		unregister_netdev(prueth->registered_netdevs[i]);
1131	}
1132
1133	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1134		eth_node = prueth->eth_node[i];
1135		if (!eth_node)
1136			continue;
1137
1138		prueth_netdev_exit(prueth, eth_node);
1139	}
1140
1141	gen_pool_free(prueth->sram_pool,
1142		      (unsigned long)prueth->msmcram.va,
1143		      MSMC_RAM_SIZE_SR1);
1144
1145	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1146
1147	pruss_put(prueth->pruss);
1148
1149	if (prueth->eth_node[PRUETH_MAC1])
1150		prueth_put_cores(prueth, ICSS_SLICE1);
1151
1152	if (prueth->eth_node[PRUETH_MAC0])
1153		prueth_put_cores(prueth, ICSS_SLICE0);
1154}
1155
1156static const struct prueth_pdata am654_sr1_icssg_pdata = {
1157	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
1158};
1159
1160static const struct of_device_id prueth_dt_match[] = {
1161	{ .compatible = "ti,am654-sr1-icssg-prueth", .data = &am654_sr1_icssg_pdata },
1162	{ /* sentinel */ }
1163};
1164MODULE_DEVICE_TABLE(of, prueth_dt_match);
1165
1166static struct platform_driver prueth_driver = {
1167	.probe = prueth_probe,
1168	.remove_new = prueth_remove,
1169	.driver = {
1170		.name = "icssg-prueth-sr1",
1171		.of_match_table = prueth_dt_match,
1172		.pm = &prueth_dev_pm_ops,
1173	},
1174};
1175module_platform_driver(prueth_driver);
1176
1177MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1178MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1179MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>");
1180MODULE_DESCRIPTION(PRUETH_MODULE_DESCRIPTION);
1181MODULE_LICENSE("GPL");
1182