1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
3
4#include <linux/bitfield.h>
5#include <linux/dmapool.h>
6#include <linux/etherdevice.h>
7#include <linux/if_vlan.h>
8#include <linux/platform_device.h>
9
10#include "prestera_dsa.h"
11#include "prestera.h"
12#include "prestera_hw.h"
13#include "prestera_rxtx.h"
14#include "prestera_devlink.h"
15
16#define PRESTERA_SDMA_WAIT_MUL		10
17
18struct prestera_sdma_desc {
19	__le32 word1;
20	__le32 word2;
21	__le32 buff;
22	__le32 next;
23} __packed __aligned(16);
24
25#define PRESTERA_SDMA_BUFF_SIZE_MAX	1544
26
27#define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
28	((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
29
30#define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
31	((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
32
33#define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
34	(PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
35
36#define PRESTERA_SDMA_RX_DESC_CPU_OWN	0
37#define PRESTERA_SDMA_RX_DESC_DMA_OWN	1
38
39#define PRESTERA_SDMA_RX_QUEUE_NUM	8
40
41#define PRESTERA_SDMA_RX_DESC_PER_Q	1000
42
43#define PRESTERA_SDMA_TX_DESC_PER_Q	1000
44#define PRESTERA_SDMA_TX_MAX_BURST	64
45
46#define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
47	((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
48
49#define PRESTERA_SDMA_TX_DESC_CPU_OWN	0
50#define PRESTERA_SDMA_TX_DESC_DMA_OWN	1U
51
52#define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
53	(PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
54
55#define PRESTERA_SDMA_TX_DESC_LAST	BIT(20)
56#define PRESTERA_SDMA_TX_DESC_FIRST	BIT(21)
57#define PRESTERA_SDMA_TX_DESC_CALC_CRC	BIT(12)
58
59#define PRESTERA_SDMA_TX_DESC_SINGLE	\
60	(PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
61
62#define PRESTERA_SDMA_TX_DESC_INIT	\
63	(PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
64
65#define PRESTERA_SDMA_RX_INTR_MASK_REG		0x2814
66#define PRESTERA_SDMA_RX_QUEUE_STATUS_REG	0x2680
67#define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n)	(0x260C + (n) * 16)
68
69#define PRESTERA_SDMA_TX_QUEUE_DESC_REG		0x26C0
70#define PRESTERA_SDMA_TX_QUEUE_START_REG	0x2868
71
72struct prestera_sdma_buf {
73	struct prestera_sdma_desc *desc;
74	dma_addr_t desc_dma;
75	struct sk_buff *skb;
76	dma_addr_t buf_dma;
77	bool is_used;
78};
79
80struct prestera_rx_ring {
81	struct prestera_sdma_buf *bufs;
82	int next_rx;
83};
84
85struct prestera_tx_ring {
86	struct prestera_sdma_buf *bufs;
87	int next_tx;
88	int max_burst;
89	int burst;
90};
91
92struct prestera_sdma {
93	struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
94	struct prestera_tx_ring tx_ring;
95	struct prestera_switch *sw;
96	struct dma_pool *desc_pool;
97	struct work_struct tx_work;
98	struct napi_struct rx_napi;
99	struct net_device napi_dev;
100	u32 map_addr;
101	u64 dma_mask;
102	/* protect SDMA with concurrent access from multiple CPUs */
103	spinlock_t tx_lock;
104};
105
106struct prestera_rxtx {
107	struct prestera_sdma sdma;
108};
109
110static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
111				  struct prestera_sdma_buf *buf)
112{
113	struct prestera_sdma_desc *desc;
114	dma_addr_t dma;
115
116	desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
117	if (!desc)
118		return -ENOMEM;
119
120	buf->buf_dma = DMA_MAPPING_ERROR;
121	buf->desc_dma = dma;
122	buf->desc = desc;
123	buf->skb = NULL;
124
125	return 0;
126}
127
128static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
129{
130	return sdma->map_addr + pa;
131}
132
133static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
134				       struct prestera_sdma_desc *desc,
135				       dma_addr_t buf)
136{
137	u32 word = le32_to_cpu(desc->word2);
138
139	u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
140	desc->word2 = cpu_to_le32(word);
141
142	desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
143
144	/* make sure buffer is set before reset the descriptor */
145	wmb();
146
147	desc->word1 = cpu_to_le32(0xA0000000);
148}
149
150static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
151					   struct prestera_sdma_desc *desc,
152					   dma_addr_t next)
153{
154	desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
155}
156
157static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
158				      struct prestera_sdma_buf *buf)
159{
160	struct device *dev = sdma->sw->dev->dev;
161	struct sk_buff *skb;
162	dma_addr_t dma;
163
164	skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
165	if (!skb)
166		return -ENOMEM;
167
168	dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
169	if (dma_mapping_error(dev, dma))
170		goto err_dma_map;
171
172	if (buf->skb)
173		dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
174				 DMA_FROM_DEVICE);
175
176	buf->buf_dma = dma;
177	buf->skb = skb;
178
179	return 0;
180
181err_dma_map:
182	kfree_skb(skb);
183
184	return -ENOMEM;
185}
186
187static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
188						struct prestera_sdma_buf *buf)
189{
190	dma_addr_t buf_dma = buf->buf_dma;
191	struct sk_buff *skb = buf->skb;
192	u32 len = skb->len;
193	int err;
194
195	err = prestera_sdma_rx_skb_alloc(sdma, buf);
196	if (err) {
197		buf->buf_dma = buf_dma;
198		buf->skb = skb;
199
200		skb = alloc_skb(skb->len, GFP_ATOMIC);
201		if (skb) {
202			skb_put(skb, len);
203			skb_copy_from_linear_data(buf->skb, skb->data, len);
204		}
205	}
206
207	prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
208
209	return skb;
210}
211
212static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
213				     struct sk_buff *skb)
214{
215	struct prestera_port *port;
216	struct prestera_dsa dsa;
217	u32 hw_port, dev_id;
218	u8 cpu_code;
219	int err;
220
221	skb_pull(skb, ETH_HLEN);
222
223	/* ethertype field is part of the dsa header */
224	err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
225	if (err)
226		return err;
227
228	dev_id = dsa.hw_dev_num;
229	hw_port = dsa.port_num;
230
231	port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
232	if (unlikely(!port)) {
233		dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
234				     dev_id, hw_port);
235		return -ENOENT;
236	}
237
238	if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
239		return -EINVAL;
240
241	/* remove DSA tag and update checksum */
242	skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
243
244	memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
245		ETH_ALEN * 2);
246
247	skb_push(skb, ETH_HLEN);
248
249	skb->protocol = eth_type_trans(skb, port->dev);
250
251	if (dsa.vlan.is_tagged) {
252		u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
253
254		tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
255		if (dsa.vlan.cfi_bit)
256			tci |= VLAN_CFI_MASK;
257
258		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
259	}
260
261	cpu_code = dsa.cpu_code;
262	prestera_devlink_trap_report(port, skb, cpu_code);
263
264	return 0;
265}
266
267static int prestera_sdma_next_rx_buf_idx(int buf_idx)
268{
269	return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
270}
271
272static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
273{
274	int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
275	unsigned int rxq_done_map = 0;
276	struct prestera_sdma *sdma;
277	struct list_head rx_list;
278	unsigned int qmask;
279	int pkts_done = 0;
280	int q;
281
282	qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
283	qmask = GENMASK(qnum - 1, 0);
284
285	INIT_LIST_HEAD(&rx_list);
286
287	sdma = container_of(napi, struct prestera_sdma, rx_napi);
288
289	while (pkts_done < budget && rxq_done_map != qmask) {
290		for (q = 0; q < qnum && pkts_done < budget; q++) {
291			struct prestera_rx_ring *ring = &sdma->rx_ring[q];
292			struct prestera_sdma_desc *desc;
293			struct prestera_sdma_buf *buf;
294			int buf_idx = ring->next_rx;
295			struct sk_buff *skb;
296
297			buf = &ring->bufs[buf_idx];
298			desc = buf->desc;
299
300			if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
301				rxq_done_map &= ~BIT(q);
302			} else {
303				rxq_done_map |= BIT(q);
304				continue;
305			}
306
307			pkts_done++;
308
309			__skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
310
311			skb = prestera_sdma_rx_skb_get(sdma, buf);
312			if (!skb)
313				goto rx_next_buf;
314
315			if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
316				goto rx_next_buf;
317
318			list_add_tail(&skb->list, &rx_list);
319rx_next_buf:
320			ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
321		}
322	}
323
324	if (pkts_done < budget && napi_complete_done(napi, pkts_done))
325		prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
326			       GENMASK(9, 2));
327
328	netif_receive_skb_list(&rx_list);
329
330	return pkts_done;
331}
332
333static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
334{
335	int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
336	int q, b;
337
338	/* disable all rx queues */
339	prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
340		       GENMASK(15, 8));
341
342	for (q = 0; q < qnum; q++) {
343		struct prestera_rx_ring *ring = &sdma->rx_ring[q];
344
345		if (!ring->bufs)
346			break;
347
348		for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
349			struct prestera_sdma_buf *buf = &ring->bufs[b];
350
351			if (buf->desc_dma)
352				dma_pool_free(sdma->desc_pool, buf->desc,
353					      buf->desc_dma);
354
355			if (!buf->skb)
356				continue;
357
358			if (buf->buf_dma != DMA_MAPPING_ERROR)
359				dma_unmap_single(sdma->sw->dev->dev,
360						 buf->buf_dma, buf->skb->len,
361						 DMA_FROM_DEVICE);
362			kfree_skb(buf->skb);
363		}
364	}
365}
366
367static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
368{
369	int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
370	int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
371	int err;
372	int q;
373
374	/* disable all rx queues */
375	prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
376		       GENMASK(15, 8));
377
378	for (q = 0; q < qnum; q++) {
379		struct prestera_sdma_buf *head, *tail, *next, *prev;
380		struct prestera_rx_ring *ring = &sdma->rx_ring[q];
381
382		ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
383		if (!ring->bufs)
384			return -ENOMEM;
385
386		ring->next_rx = 0;
387
388		tail = &ring->bufs[bnum - 1];
389		head = &ring->bufs[0];
390		next = head;
391		prev = next;
392
393		do {
394			err = prestera_sdma_buf_init(sdma, next);
395			if (err)
396				return err;
397
398			err = prestera_sdma_rx_skb_alloc(sdma, next);
399			if (err)
400				return err;
401
402			prestera_sdma_rx_desc_init(sdma, next->desc,
403						   next->buf_dma);
404
405			prestera_sdma_rx_desc_set_next(sdma, prev->desc,
406						       next->desc_dma);
407
408			prev = next;
409			next++;
410		} while (prev != tail);
411
412		/* join tail with head to make a circular list */
413		prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
414
415		prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
416			       prestera_sdma_map(sdma, head->desc_dma));
417	}
418
419	/* make sure all rx descs are filled before enabling all rx queues */
420	wmb();
421
422	prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
423		       GENMASK(7, 0));
424
425	return 0;
426}
427
428static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
429				       struct prestera_sdma_desc *desc)
430{
431	desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
432	desc->word2 = 0;
433}
434
435static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
436					   struct prestera_sdma_desc *desc,
437					   dma_addr_t next)
438{
439	desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
440}
441
442static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
443					  struct prestera_sdma_desc *desc,
444					  dma_addr_t buf, size_t len)
445{
446	u32 word = le32_to_cpu(desc->word2);
447
448	u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
449
450	desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
451	desc->word2 = cpu_to_le32(word);
452}
453
454static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
455{
456	u32 word = le32_to_cpu(desc->word1);
457
458	word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
459
460	/* make sure everything is written before enable xmit */
461	wmb();
462
463	desc->word1 = cpu_to_le32(word);
464}
465
466static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
467				    struct prestera_sdma_buf *buf,
468				    struct sk_buff *skb)
469{
470	struct device *dma_dev = sdma->sw->dev->dev;
471	dma_addr_t dma;
472
473	dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
474	if (dma_mapping_error(dma_dev, dma))
475		return -ENOMEM;
476
477	buf->buf_dma = dma;
478	buf->skb = skb;
479
480	return 0;
481}
482
483static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
484				       struct prestera_sdma_buf *buf)
485{
486	struct device *dma_dev = sdma->sw->dev->dev;
487
488	dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
489}
490
491static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
492{
493	int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
494	struct prestera_tx_ring *tx_ring;
495	struct prestera_sdma *sdma;
496	int b;
497
498	sdma = container_of(work, struct prestera_sdma, tx_work);
499
500	tx_ring = &sdma->tx_ring;
501
502	for (b = 0; b < bnum; b++) {
503		struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
504
505		if (!buf->is_used)
506			continue;
507
508		if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
509			continue;
510
511		prestera_sdma_tx_buf_unmap(sdma, buf);
512		dev_consume_skb_any(buf->skb);
513		buf->skb = NULL;
514
515		/* make sure everything is cleaned up */
516		wmb();
517
518		buf->is_used = false;
519	}
520}
521
522static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
523{
524	struct prestera_sdma_buf *head, *tail, *next, *prev;
525	struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
526	int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
527	int err;
528
529	INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
530	spin_lock_init(&sdma->tx_lock);
531
532	tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
533	if (!tx_ring->bufs)
534		return -ENOMEM;
535
536	tail = &tx_ring->bufs[bnum - 1];
537	head = &tx_ring->bufs[0];
538	next = head;
539	prev = next;
540
541	tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
542	tx_ring->burst = tx_ring->max_burst;
543	tx_ring->next_tx = 0;
544
545	do {
546		err = prestera_sdma_buf_init(sdma, next);
547		if (err)
548			return err;
549
550		next->is_used = false;
551
552		prestera_sdma_tx_desc_init(sdma, next->desc);
553
554		prestera_sdma_tx_desc_set_next(sdma, prev->desc,
555					       next->desc_dma);
556
557		prev = next;
558		next++;
559	} while (prev != tail);
560
561	/* join tail with head to make a circular list */
562	prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
563
564	/* make sure descriptors are written */
565	wmb();
566
567	prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
568		       prestera_sdma_map(sdma, head->desc_dma));
569
570	return 0;
571}
572
573static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
574{
575	struct prestera_tx_ring *ring = &sdma->tx_ring;
576	int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
577	int b;
578
579	cancel_work_sync(&sdma->tx_work);
580
581	if (!ring->bufs)
582		return;
583
584	for (b = 0; b < bnum; b++) {
585		struct prestera_sdma_buf *buf = &ring->bufs[b];
586
587		if (buf->desc)
588			dma_pool_free(sdma->desc_pool, buf->desc,
589				      buf->desc_dma);
590
591		if (!buf->skb)
592			continue;
593
594		dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
595				 buf->skb->len, DMA_TO_DEVICE);
596
597		dev_consume_skb_any(buf->skb);
598	}
599}
600
601static void prestera_rxtx_handle_event(struct prestera_switch *sw,
602				       struct prestera_event *evt,
603				       void *arg)
604{
605	struct prestera_sdma *sdma = arg;
606
607	if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
608		return;
609
610	prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
611	napi_schedule(&sdma->rx_napi);
612}
613
614static int prestera_sdma_switch_init(struct prestera_switch *sw)
615{
616	struct prestera_sdma *sdma = &sw->rxtx->sdma;
617	struct device *dev = sw->dev->dev;
618	struct prestera_rxtx_params p;
619	int err;
620
621	p.use_sdma = true;
622
623	err = prestera_hw_rxtx_init(sw, &p);
624	if (err) {
625		dev_err(dev, "failed to init rxtx by hw\n");
626		return err;
627	}
628
629	sdma->dma_mask = dma_get_mask(dev);
630	sdma->map_addr = p.map_addr;
631	sdma->sw = sw;
632
633	sdma->desc_pool = dma_pool_create("desc_pool", dev,
634					  sizeof(struct prestera_sdma_desc),
635					  16, 0);
636	if (!sdma->desc_pool)
637		return -ENOMEM;
638
639	err = prestera_sdma_rx_init(sdma);
640	if (err) {
641		dev_err(dev, "failed to init rx ring\n");
642		goto err_rx_init;
643	}
644
645	err = prestera_sdma_tx_init(sdma);
646	if (err) {
647		dev_err(dev, "failed to init tx ring\n");
648		goto err_tx_init;
649	}
650
651	err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
652						 prestera_rxtx_handle_event,
653						 sdma);
654	if (err)
655		goto err_evt_register;
656
657	init_dummy_netdev(&sdma->napi_dev);
658
659	netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
660	napi_enable(&sdma->rx_napi);
661
662	return 0;
663
664err_evt_register:
665err_tx_init:
666	prestera_sdma_tx_fini(sdma);
667err_rx_init:
668	prestera_sdma_rx_fini(sdma);
669
670	dma_pool_destroy(sdma->desc_pool);
671	return err;
672}
673
674static void prestera_sdma_switch_fini(struct prestera_switch *sw)
675{
676	struct prestera_sdma *sdma = &sw->rxtx->sdma;
677
678	napi_disable(&sdma->rx_napi);
679	netif_napi_del(&sdma->rx_napi);
680	prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
681					     prestera_rxtx_handle_event);
682	prestera_sdma_tx_fini(sdma);
683	prestera_sdma_rx_fini(sdma);
684	dma_pool_destroy(sdma->desc_pool);
685}
686
687static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
688{
689	return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
690}
691
692static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
693				 struct prestera_tx_ring *tx_ring)
694{
695	int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
696
697	do {
698		if (prestera_sdma_is_ready(sdma))
699			return 0;
700
701		udelay(1);
702	} while (--tx_wait_num);
703
704	return -EBUSY;
705}
706
707static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
708{
709	prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
710	schedule_work(&sdma->tx_work);
711}
712
713static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
714				      struct sk_buff *skb)
715{
716	struct device *dma_dev = sdma->sw->dev->dev;
717	struct net_device *dev = skb->dev;
718	struct prestera_tx_ring *tx_ring;
719	struct prestera_sdma_buf *buf;
720	int err;
721
722	spin_lock(&sdma->tx_lock);
723
724	tx_ring = &sdma->tx_ring;
725
726	buf = &tx_ring->bufs[tx_ring->next_tx];
727	if (buf->is_used) {
728		schedule_work(&sdma->tx_work);
729		goto drop_skb;
730	}
731
732	if (unlikely(eth_skb_pad(skb)))
733		goto drop_skb_nofree;
734
735	err = prestera_sdma_tx_buf_map(sdma, buf, skb);
736	if (err)
737		goto drop_skb;
738
739	prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
740
741	dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
742				   DMA_TO_DEVICE);
743
744	if (tx_ring->burst) {
745		tx_ring->burst--;
746	} else {
747		tx_ring->burst = tx_ring->max_burst;
748
749		err = prestera_sdma_tx_wait(sdma, tx_ring);
750		if (err)
751			goto drop_skb_unmap;
752	}
753
754	tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
755	prestera_sdma_tx_desc_xmit(buf->desc);
756	buf->is_used = true;
757
758	prestera_sdma_tx_start(sdma);
759
760	goto tx_done;
761
762drop_skb_unmap:
763	prestera_sdma_tx_buf_unmap(sdma, buf);
764drop_skb:
765	dev_consume_skb_any(skb);
766drop_skb_nofree:
767	dev->stats.tx_dropped++;
768tx_done:
769	spin_unlock(&sdma->tx_lock);
770	return NETDEV_TX_OK;
771}
772
773int prestera_rxtx_switch_init(struct prestera_switch *sw)
774{
775	struct prestera_rxtx *rxtx;
776	int err;
777
778	rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
779	if (!rxtx)
780		return -ENOMEM;
781
782	sw->rxtx = rxtx;
783
784	err = prestera_sdma_switch_init(sw);
785	if (err)
786		kfree(rxtx);
787
788	return err;
789}
790
791void prestera_rxtx_switch_fini(struct prestera_switch *sw)
792{
793	prestera_sdma_switch_fini(sw);
794	kfree(sw->rxtx);
795}
796
797int prestera_rxtx_port_init(struct prestera_port *port)
798{
799	port->dev->needed_headroom = PRESTERA_DSA_HLEN;
800	return 0;
801}
802
803netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
804{
805	struct prestera_dsa dsa;
806
807	dsa.hw_dev_num = port->dev_id;
808	dsa.port_num = port->hw_id;
809
810	if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
811		return NET_XMIT_DROP;
812
813	skb_push(skb, PRESTERA_DSA_HLEN);
814	memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
815
816	if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
817		return NET_XMIT_DROP;
818
819	return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);
820}
821