1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2
3#include <linux/dma-mapping.h>
4#include <linux/ip.h>
5#include <linux/pci.h>
6#include <linux/skbuff.h>
7#include <linux/tcp.h>
8#include <uapi/linux/udp.h>
9#include "funeth.h"
10#include "funeth_ktls.h"
11#include "funeth_txrx.h"
12#include "funeth_trace.h"
13#include "fun_queue.h"
14
15#define FUN_XDP_CLEAN_THRES 32
16#define FUN_XDP_CLEAN_BATCH 16
17
18/* DMA-map a packet and return the (length, DMA_address) pairs for its
19 * segments. If a mapping error occurs -ENOMEM is returned. The packet
20 * consists of an skb_shared_info and one additional address/length pair.
21 */
22static int fun_map_pkt(struct device *dev, const struct skb_shared_info *si,
23		       void *data, unsigned int data_len,
24		       dma_addr_t *addr, unsigned int *len)
25{
26	const skb_frag_t *fp, *end;
27
28	*len = data_len;
29	*addr = dma_map_single(dev, data, *len, DMA_TO_DEVICE);
30	if (dma_mapping_error(dev, *addr))
31		return -ENOMEM;
32
33	if (!si)
34		return 0;
35
36	for (fp = si->frags, end = fp + si->nr_frags; fp < end; fp++) {
37		*++len = skb_frag_size(fp);
38		*++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
39		if (dma_mapping_error(dev, *addr))
40			goto unwind;
41	}
42	return 0;
43
44unwind:
45	while (fp-- > si->frags)
46		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
47
48	dma_unmap_single(dev, addr[-1], data_len, DMA_TO_DEVICE);
49	return -ENOMEM;
50}
51
52/* Return the address just past the end of a Tx queue's descriptor ring.
53 * It exploits the fact that the HW writeback area is just after the end
54 * of the descriptor ring.
55 */
56static void *txq_end(const struct funeth_txq *q)
57{
58	return (void *)q->hw_wb;
59}
60
61/* Return the amount of space within a Tx ring from the given address to the
62 * end.
63 */
64static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
65{
66	return txq_end(q) - p;
67}
68
69/* Return the number of Tx descriptors occupied by a Tx request. */
70static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
71{
72	return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
73}
74
75/* Write a gather list to the Tx descriptor at @req from @ngle address/length
76 * pairs.
77 */
78static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q,
79					  struct fun_eth_tx_req *req,
80					  const dma_addr_t *addrs,
81					  const unsigned int *lens,
82					  unsigned int ngle)
83{
84	struct fun_dataop_gl *gle;
85	unsigned int i;
86
87	req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
88
89	for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
90	     i < ngle && txq_to_end(q, gle); i++, gle++)
91		fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
92
93	if (txq_to_end(q, gle) == 0) {
94		gle = (struct fun_dataop_gl *)q->desc;
95		for ( ; i < ngle; i++, gle++)
96			fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
97	}
98
99	return gle;
100}
101
102static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
103{
104	return *(__be16 *)&tcp_flag_word(th);
105}
106
107static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
108				  unsigned int *tls_len)
109{
110#if IS_ENABLED(CONFIG_TLS_DEVICE)
111	const struct fun_ktls_tx_ctx *tls_ctx;
112	u32 datalen, seq;
113
114	datalen = skb->len - skb_tcp_all_headers(skb);
115	if (!datalen)
116		return skb;
117
118	if (likely(!tls_offload_tx_resync_pending(skb->sk))) {
119		seq = ntohl(tcp_hdr(skb)->seq);
120		tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
121
122		if (likely(tls_ctx->next_seq == seq)) {
123			*tls_len = datalen;
124			return skb;
125		}
126		if (seq - tls_ctx->next_seq < U32_MAX / 4) {
127			tls_offload_tx_resync_request(skb->sk, seq,
128						      tls_ctx->next_seq);
129		}
130	}
131
132	FUN_QSTAT_INC(q, tx_tls_fallback);
133	skb = tls_encrypt_skb(skb);
134	if (!skb)
135		FUN_QSTAT_INC(q, tx_tls_drops);
136
137	return skb;
138#else
139	return NULL;
140#endif
141}
142
143/* Write as many descriptors as needed for the supplied skb starting at the
144 * current producer location. The caller has made certain enough descriptors
145 * are available.
146 *
147 * Returns the number of descriptors written, 0 on error.
148 */
149static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
150				   unsigned int tls_len)
151{
152	unsigned int extra_bytes = 0, extra_pkts = 0;
153	unsigned int idx = q->prod_cnt & q->mask;
154	const struct skb_shared_info *shinfo;
155	unsigned int lens[MAX_SKB_FRAGS + 1];
156	dma_addr_t addrs[MAX_SKB_FRAGS + 1];
157	struct fun_eth_tx_req *req;
158	struct fun_dataop_gl *gle;
159	const struct tcphdr *th;
160	unsigned int l4_hlen;
161	unsigned int ngle;
162	u16 flags;
163
164	shinfo = skb_shinfo(skb);
165	if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data,
166				 skb_headlen(skb), addrs, lens))) {
167		FUN_QSTAT_INC(q, tx_map_err);
168		return 0;
169	}
170
171	req = fun_tx_desc_addr(q, idx);
172	req->op = FUN_ETH_OP_TX;
173	req->len8 = 0;
174	req->flags = 0;
175	req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
176	req->repr_idn = 0;
177	req->encap_proto = 0;
178
179	if (likely(shinfo->gso_size)) {
180		if (skb->encapsulation) {
181			u16 ol4_ofst;
182
183			flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO |
184				FUN_ETH_UPDATE_INNER_L4_CKSUM |
185				FUN_ETH_UPDATE_OUTER_L3_LEN;
186			if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL |
187						SKB_GSO_UDP_TUNNEL_CSUM)) {
188				flags |= FUN_ETH_UPDATE_OUTER_L4_LEN |
189					 FUN_ETH_OUTER_UDP;
190				if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
191					flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM;
192				ol4_ofst = skb_transport_offset(skb);
193			} else {
194				ol4_ofst = skb_inner_network_offset(skb);
195			}
196
197			if (ip_hdr(skb)->version == 4)
198				flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM;
199			else
200				flags |= FUN_ETH_OUTER_IPV6;
201
202			if (skb->inner_network_header) {
203				if (inner_ip_hdr(skb)->version == 4)
204					flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM |
205						 FUN_ETH_UPDATE_INNER_L3_LEN;
206				else
207					flags |= FUN_ETH_INNER_IPV6 |
208						 FUN_ETH_UPDATE_INNER_L3_LEN;
209			}
210			th = inner_tcp_hdr(skb);
211			l4_hlen = __tcp_hdrlen(th);
212			fun_eth_offload_init(&req->offload, flags,
213					     shinfo->gso_size,
214					     tcp_hdr_doff_flags(th), 0,
215					     skb_inner_network_offset(skb),
216					     skb_inner_transport_offset(skb),
217					     skb_network_offset(skb), ol4_ofst);
218			FUN_QSTAT_INC(q, tx_encap_tso);
219		} else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
220			flags = FUN_ETH_INNER_LSO | FUN_ETH_INNER_UDP |
221				FUN_ETH_UPDATE_INNER_L4_CKSUM |
222				FUN_ETH_UPDATE_INNER_L4_LEN |
223				FUN_ETH_UPDATE_INNER_L3_LEN;
224
225			if (ip_hdr(skb)->version == 4)
226				flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
227			else
228				flags |= FUN_ETH_INNER_IPV6;
229
230			l4_hlen = sizeof(struct udphdr);
231			fun_eth_offload_init(&req->offload, flags,
232					     shinfo->gso_size,
233					     cpu_to_be16(l4_hlen << 10), 0,
234					     skb_network_offset(skb),
235					     skb_transport_offset(skb), 0, 0);
236			FUN_QSTAT_INC(q, tx_uso);
237		} else {
238			/* HW considers one set of headers as inner */
239			flags = FUN_ETH_INNER_LSO |
240				FUN_ETH_UPDATE_INNER_L4_CKSUM |
241				FUN_ETH_UPDATE_INNER_L3_LEN;
242			if (shinfo->gso_type & SKB_GSO_TCPV6)
243				flags |= FUN_ETH_INNER_IPV6;
244			else
245				flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
246			th = tcp_hdr(skb);
247			l4_hlen = __tcp_hdrlen(th);
248			fun_eth_offload_init(&req->offload, flags,
249					     shinfo->gso_size,
250					     tcp_hdr_doff_flags(th), 0,
251					     skb_network_offset(skb),
252					     skb_transport_offset(skb), 0, 0);
253			FUN_QSTAT_INC(q, tx_tso);
254		}
255
256		u64_stats_update_begin(&q->syncp);
257		q->stats.tx_cso += shinfo->gso_segs;
258		u64_stats_update_end(&q->syncp);
259
260		extra_pkts = shinfo->gso_segs - 1;
261		extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
262			       l4_hlen) * extra_pkts;
263	} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
264		flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
265		if (skb->csum_offset == offsetof(struct udphdr, check))
266			flags |= FUN_ETH_INNER_UDP;
267		fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0,
268				     skb_checksum_start_offset(skb), 0, 0);
269		FUN_QSTAT_INC(q, tx_cso);
270	} else {
271		fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
272	}
273
274	ngle = shinfo->nr_frags + 1;
275	req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
276
277	gle = fun_write_gl(q, req, addrs, lens, ngle);
278
279	if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
280		struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
281		struct fun_ktls_tx_ctx *tls_ctx;
282
283		req->len8 += FUNETH_TLS_SZ / 8;
284		req->flags = cpu_to_be16(FUN_ETH_TX_TLS);
285
286		tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
287		tls->tlsid = tls_ctx->tlsid;
288		tls_ctx->next_seq += tls_len;
289
290		u64_stats_update_begin(&q->syncp);
291		q->stats.tx_tls_bytes += tls_len;
292		q->stats.tx_tls_pkts += 1 + extra_pkts;
293		u64_stats_update_end(&q->syncp);
294	}
295
296	u64_stats_update_begin(&q->syncp);
297	q->stats.tx_bytes += skb->len + extra_bytes;
298	q->stats.tx_pkts += 1 + extra_pkts;
299	u64_stats_update_end(&q->syncp);
300
301	q->info[idx].skb = skb;
302
303	trace_funeth_tx(q, skb->len, idx, req->dataop.ngather);
304	return tx_req_ndesc(req);
305}
306
307/* Return the number of available descriptors of a Tx queue.
308 * HW assumes head==tail means the ring is empty so we need to keep one
309 * descriptor unused.
310 */
311static unsigned int fun_txq_avail(const struct funeth_txq *q)
312{
313	return q->mask - q->prod_cnt + q->cons_cnt;
314}
315
316/* Stop a queue if it can't handle another worst-case packet. */
317static void fun_tx_check_stop(struct funeth_txq *q)
318{
319	if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC))
320		return;
321
322	netif_tx_stop_queue(q->ndq);
323
324	/* NAPI reclaim is freeing packets in parallel with us and we may race.
325	 * We have stopped the queue but check again after synchronizing with
326	 * reclaim.
327	 */
328	smp_mb();
329	if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC))
330		FUN_QSTAT_INC(q, tx_nstops);
331	else
332		netif_tx_start_queue(q->ndq);
333}
334
335/* Return true if a queue has enough space to restart. Current condition is
336 * that the queue must be >= 1/4 empty.
337 */
338static bool fun_txq_may_restart(struct funeth_txq *q)
339{
340	return fun_txq_avail(q) >= q->mask / 4;
341}
342
343netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev)
344{
345	struct funeth_priv *fp = netdev_priv(netdev);
346	unsigned int qid = skb_get_queue_mapping(skb);
347	struct funeth_txq *q = fp->txqs[qid];
348	unsigned int tls_len = 0;
349	unsigned int ndesc;
350
351	if (tls_is_skb_tx_device_offloaded(skb)) {
352		skb = fun_tls_tx(skb, q, &tls_len);
353		if (unlikely(!skb))
354			goto dropped;
355	}
356
357	ndesc = write_pkt_desc(skb, q, tls_len);
358	if (unlikely(!ndesc)) {
359		dev_kfree_skb_any(skb);
360		goto dropped;
361	}
362
363	q->prod_cnt += ndesc;
364	fun_tx_check_stop(q);
365
366	skb_tx_timestamp(skb);
367
368	if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more()))
369		fun_txq_wr_db(q);
370	else
371		FUN_QSTAT_INC(q, tx_more);
372
373	return NETDEV_TX_OK;
374
375dropped:
376	/* A dropped packet may be the last one in a xmit_more train,
377	 * ring the doorbell just in case.
378	 */
379	if (!netdev_xmit_more())
380		fun_txq_wr_db(q);
381	return NETDEV_TX_OK;
382}
383
384/* Return a Tx queue's HW head index written back to host memory. */
385static u16 txq_hw_head(const struct funeth_txq *q)
386{
387	return (u16)be64_to_cpu(*q->hw_wb);
388}
389
390/* Unmap the Tx packet starting at the given descriptor index and
391 * return the number of Tx descriptors it occupied.
392 */
393static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx)
394{
395	const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
396	unsigned int ngle = req->dataop.ngather;
397	struct fun_dataop_gl *gle;
398
399	if (ngle) {
400		gle = (struct fun_dataop_gl *)req->dataop.imm;
401		dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
402				 be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
403
404		for (gle++; --ngle && txq_to_end(q, gle); gle++)
405			dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
406				       be32_to_cpu(gle->sgl_len),
407				       DMA_TO_DEVICE);
408
409		for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++)
410			dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
411				       be32_to_cpu(gle->sgl_len),
412				       DMA_TO_DEVICE);
413	}
414
415	return tx_req_ndesc(req);
416}
417
418/* Reclaim completed Tx descriptors and free their packets. Restart a stopped
419 * queue if we freed enough descriptors.
420 *
421 * Return true if we exhausted the budget while there is more work to be done.
422 */
423static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
424{
425	unsigned int npkts = 0, nbytes = 0, ndesc = 0;
426	unsigned int head, limit, reclaim_idx;
427
428	/* budget may be 0, e.g., netpoll */
429	limit = budget ? budget : UINT_MAX;
430
431	for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
432	     head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) {
433		/* The HW head is continually updated, ensure we don't read
434		 * descriptor state before the head tells us to reclaim it.
435		 * On the enqueue side the doorbell is an implicit write
436		 * barrier.
437		 */
438		rmb();
439
440		do {
441			unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
442			struct sk_buff *skb = q->info[reclaim_idx].skb;
443
444			trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
445
446			nbytes += skb->len;
447			napi_consume_skb(skb, budget);
448			ndesc += pkt_desc;
449			reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
450			npkts++;
451		} while (reclaim_idx != head && npkts < limit);
452	}
453
454	q->cons_cnt += ndesc;
455	netdev_tx_completed_queue(q->ndq, npkts, nbytes);
456	smp_mb(); /* pairs with the one in fun_tx_check_stop() */
457
458	if (unlikely(netif_tx_queue_stopped(q->ndq) &&
459		     fun_txq_may_restart(q))) {
460		netif_tx_wake_queue(q->ndq);
461		FUN_QSTAT_INC(q, tx_nrestarts);
462	}
463
464	return reclaim_idx != head;
465}
466
467/* The NAPI handler for Tx queues. */
468int fun_txq_napi_poll(struct napi_struct *napi, int budget)
469{
470	struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
471	struct funeth_txq *q = irq->txq;
472	unsigned int db_val;
473
474	if (fun_txq_reclaim(q, budget))
475		return budget;               /* exhausted budget */
476
477	napi_complete(napi);                 /* exhausted pending work */
478	db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask);
479	writel(db_val, q->db);
480	return 0;
481}
482
483/* Reclaim up to @budget completed Tx packets from a TX XDP queue. */
484static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
485{
486	unsigned int npkts = 0, ndesc = 0, head, reclaim_idx;
487
488	for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
489	     head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
490		/* The HW head is continually updated, ensure we don't read
491		 * descriptor state before the head tells us to reclaim it.
492		 * On the enqueue side the doorbell is an implicit write
493		 * barrier.
494		 */
495		rmb();
496
497		do {
498			unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
499
500			xdp_return_frame(q->info[reclaim_idx].xdpf);
501
502			trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
503
504			reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
505			ndesc += pkt_desc;
506			npkts++;
507		} while (reclaim_idx != head && npkts < budget);
508	}
509
510	q->cons_cnt += ndesc;
511	return npkts;
512}
513
514bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
515{
516	unsigned int idx, nfrags = 1, ndesc = 1, tot_len = xdpf->len;
517	const struct skb_shared_info *si = NULL;
518	unsigned int lens[MAX_SKB_FRAGS + 1];
519	dma_addr_t dma[MAX_SKB_FRAGS + 1];
520	struct fun_eth_tx_req *req;
521
522	if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
523		fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
524
525	if (unlikely(xdp_frame_has_frags(xdpf))) {
526		si = xdp_get_shared_info_from_frame(xdpf);
527		tot_len = xdp_get_frame_len(xdpf);
528		nfrags += si->nr_frags;
529		ndesc = DIV_ROUND_UP((sizeof(*req) + nfrags *
530				      sizeof(struct fun_dataop_gl)),
531				     FUNETH_SQE_SIZE);
532	}
533
534	if (unlikely(fun_txq_avail(q) < ndesc)) {
535		FUN_QSTAT_INC(q, tx_xdp_full);
536		return false;
537	}
538
539	if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma,
540				 lens))) {
541		FUN_QSTAT_INC(q, tx_map_err);
542		return false;
543	}
544
545	idx = q->prod_cnt & q->mask;
546	req = fun_tx_desc_addr(q, idx);
547	req->op = FUN_ETH_OP_TX;
548	req->len8 = 0;
549	req->flags = 0;
550	req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
551	req->repr_idn = 0;
552	req->encap_proto = 0;
553	fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
554	req->dataop = FUN_DATAOP_HDR_INIT(nfrags, 0, nfrags, 0, tot_len);
555
556	fun_write_gl(q, req, dma, lens, nfrags);
557
558	q->info[idx].xdpf = xdpf;
559
560	u64_stats_update_begin(&q->syncp);
561	q->stats.tx_bytes += tot_len;
562	q->stats.tx_pkts++;
563	u64_stats_update_end(&q->syncp);
564
565	trace_funeth_tx(q, tot_len, idx, nfrags);
566	q->prod_cnt += ndesc;
567
568	return true;
569}
570
571int fun_xdp_xmit_frames(struct net_device *dev, int n,
572			struct xdp_frame **frames, u32 flags)
573{
574	struct funeth_priv *fp = netdev_priv(dev);
575	struct funeth_txq *q, **xdpqs;
576	int i, q_idx;
577
578	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
579		return -EINVAL;
580
581	xdpqs = rcu_dereference_bh(fp->xdpqs);
582	if (unlikely(!xdpqs))
583		return -ENETDOWN;
584
585	q_idx = smp_processor_id();
586	if (unlikely(q_idx >= fp->num_xdpqs))
587		return -ENXIO;
588
589	for (q = xdpqs[q_idx], i = 0; i < n; i++)
590		if (!fun_xdp_tx(q, frames[i]))
591			break;
592
593	if (unlikely(flags & XDP_XMIT_FLUSH))
594		fun_txq_wr_db(q);
595	return i;
596}
597
598/* Purge a Tx queue of any queued packets. Should be called once HW access
599 * to the packets has been revoked, e.g., after the queue has been disabled.
600 */
601static void fun_txq_purge(struct funeth_txq *q)
602{
603	while (q->cons_cnt != q->prod_cnt) {
604		unsigned int idx = q->cons_cnt & q->mask;
605
606		q->cons_cnt += fun_unmap_pkt(q, idx);
607		dev_kfree_skb_any(q->info[idx].skb);
608	}
609	netdev_tx_reset_queue(q->ndq);
610}
611
612static void fun_xdpq_purge(struct funeth_txq *q)
613{
614	while (q->cons_cnt != q->prod_cnt) {
615		unsigned int idx = q->cons_cnt & q->mask;
616
617		q->cons_cnt += fun_unmap_pkt(q, idx);
618		xdp_return_frame(q->info[idx].xdpf);
619	}
620}
621
622/* Create a Tx queue, allocating all the host resources needed. */
623static struct funeth_txq *fun_txq_create_sw(struct net_device *dev,
624					    unsigned int qidx,
625					    unsigned int ndesc,
626					    struct fun_irq *irq)
627{
628	struct funeth_priv *fp = netdev_priv(dev);
629	struct funeth_txq *q;
630	int numa_node;
631
632	if (irq)
633		numa_node = fun_irq_node(irq); /* skb Tx queue */
634	else
635		numa_node = cpu_to_node(qidx); /* XDP Tx queue */
636
637	q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
638	if (!q)
639		goto err;
640
641	q->dma_dev = &fp->pdev->dev;
642	q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE,
643				     sizeof(*q->info), true, numa_node,
644				     &q->dma_addr, (void **)&q->info,
645				     &q->hw_wb);
646	if (!q->desc)
647		goto free_q;
648
649	q->netdev = dev;
650	q->mask = ndesc - 1;
651	q->qidx = qidx;
652	q->numa_node = numa_node;
653	u64_stats_init(&q->syncp);
654	q->init_state = FUN_QSTATE_INIT_SW;
655	return q;
656
657free_q:
658	kfree(q);
659err:
660	netdev_err(dev, "Can't allocate memory for %s queue %u\n",
661		   irq ? "Tx" : "XDP", qidx);
662	return NULL;
663}
664
665static void fun_txq_free_sw(struct funeth_txq *q)
666{
667	struct funeth_priv *fp = netdev_priv(q->netdev);
668
669	fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true,
670			  q->desc, q->dma_addr, q->info);
671
672	fp->tx_packets += q->stats.tx_pkts;
673	fp->tx_bytes   += q->stats.tx_bytes;
674	fp->tx_dropped += q->stats.tx_map_err;
675
676	kfree(q);
677}
678
679/* Allocate the device portion of a Tx queue. */
680int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
681{
682	struct funeth_priv *fp = netdev_priv(q->netdev);
683	unsigned int irq_idx, ndesc = q->mask + 1;
684	int err;
685
686	q->irq = irq;
687	*q->hw_wb = 0;
688	q->prod_cnt = 0;
689	q->cons_cnt = 0;
690	irq_idx = irq ? irq->irq_idx : 0;
691
692	err = fun_sq_create(fp->fdev,
693			    FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS |
694			    FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0,
695			    FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc,
696			    q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec,
697			    irq_idx, 0, fp->fdev->kern_end_qid, 0,
698			    &q->hw_qid, &q->db);
699	if (err)
700		goto out;
701
702	err = fun_create_and_bind_tx(fp, q->hw_qid);
703	if (err < 0)
704		goto free_devq;
705	q->ethid = err;
706
707	if (irq) {
708		irq->txq = q;
709		q->ndq = netdev_get_tx_queue(q->netdev, q->qidx);
710		q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec,
711					      fp->tx_coal_count);
712		writel(q->irq_db_val, q->db);
713	}
714
715	q->init_state = FUN_QSTATE_INIT_FULL;
716	netif_info(fp, ifup, q->netdev,
717		   "%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n",
718		   irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx,
719		   q->ethid, q->numa_node);
720	return 0;
721
722free_devq:
723	fun_destroy_sq(fp->fdev, q->hw_qid);
724out:
725	netdev_err(q->netdev,
726		   "Failed to create %s queue %u on device, error %d\n",
727		   irq ? "Tx" : "XDP", q->qidx, err);
728	return err;
729}
730
731static void fun_txq_free_dev(struct funeth_txq *q)
732{
733	struct funeth_priv *fp = netdev_priv(q->netdev);
734
735	if (q->init_state < FUN_QSTATE_INIT_FULL)
736		return;
737
738	netif_info(fp, ifdown, q->netdev,
739		   "Freeing %s queue %u (id %u), IRQ %u, ethid %u\n",
740		   q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid,
741		   q->irq ? q->irq->irq_idx : 0, q->ethid);
742
743	fun_destroy_sq(fp->fdev, q->hw_qid);
744	fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid);
745
746	if (q->irq) {
747		q->irq->txq = NULL;
748		fun_txq_purge(q);
749	} else {
750		fun_xdpq_purge(q);
751	}
752
753	q->init_state = FUN_QSTATE_INIT_SW;
754}
755
756/* Create or advance a Tx queue, allocating all the host and device resources
757 * needed to reach the target state.
758 */
759int funeth_txq_create(struct net_device *dev, unsigned int qidx,
760		      unsigned int ndesc, struct fun_irq *irq, int state,
761		      struct funeth_txq **qp)
762{
763	struct funeth_txq *q = *qp;
764	int err;
765
766	if (!q)
767		q = fun_txq_create_sw(dev, qidx, ndesc, irq);
768	if (!q)
769		return -ENOMEM;
770
771	if (q->init_state >= state)
772		goto out;
773
774	err = fun_txq_create_dev(q, irq);
775	if (err) {
776		if (!*qp)
777			fun_txq_free_sw(q);
778		return err;
779	}
780
781out:
782	*qp = q;
783	return 0;
784}
785
786/* Free Tx queue resources until it reaches the target state.
787 * The queue must be already disconnected from the stack.
788 */
789struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
790{
791	if (state < FUN_QSTATE_INIT_FULL)
792		fun_txq_free_dev(q);
793
794	if (state == FUN_QSTATE_DESTROYED) {
795		fun_txq_free_sw(q);
796		q = NULL;
797	}
798
799	return q;
800}
801